repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
tkaitchuck/nupic
|
py/nupic/research/TP_R.py
|
1
|
127081
|
#! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# HI EVERYONE!
import numpy
from numpy import *
import sys
import time
import cPickle as pickle
from itertools import product
import pprint
import copy
import random
import nupic.math
from nupic.support.consoleprinter import ConsolePrinterMixin
from nupic.bindings.math import Random
from nupic.bindings.algorithms import isSegmentActive, getSegmentActivityLevel
from nupic.research.TrivialPredictor import TrivialPredictor
# Default verbosity while running unit tests
VERBOSITY = 0
# The numpy equivalent to the floating point type used by NTA
dtype = nupic.math.GetNTAReal()
class TP_R(ConsolePrinterMixin):
"""Class implementing the temporal pooler algorithm as described in the
published Cortical Learning Algorithm documentation. The implementation here
attempts to closely match the pseudocode in the documentation. This
implementation does contain several additional bells and whistles such as
a column confidence measure.
"""
##############################################################################
# todo: Have some higher level flags for fast learning, HiLo, Pooling, etc.
def __init__(self,
numberOfCols = 500,
cellsPerColumn = 10,
initialPerm = 0.11,
connectedPerm = 0.50,
minThreshold = 8,
newSynapseCount = 15,
permanenceInc = 0.10,
permanenceDec = 0.10,
permanenceMax = 1.0,
globalDecay = 0.10,
activationThreshold = 12,
doPooling = False, # allows to turn off pooling
segUpdateValidDuration = 5,
burnIn = 2, # Used for evaluating the prediction score
collectStats = False, # If true, collect training and inference stats
seed = 42,
verbosity = VERBOSITY,
checkSynapseConsistency = False, # for compatibility with cpp only -- ignored
# List (as string) of trivial predictions to compute alongside
# the full TP. See TrivialPredictor.py for a list of allowed methods
trivialPredictionMethods= '',
pamLength = 1,
maxInfBacktrack = 10,
maxLrnBacktrack = 5,
maxAge = 100000,
maxSeqLength = 32,
# Fixed size mode params
maxSegmentsPerCell = -1,
maxSynapsesPerSegment = -1,
# Output type
outputType = 'normal',
):
""" Construct the TP
TODO: Document other constructor parameters....
Parameters:
-------------------------------------------------------------------
....
pamLength: Number of time steps to remain in "Pay Attention Mode" after
we detect we've reached the end of a learned sequence. Setting
this to 0 disables PAM mode. When we are in PAM mode, we do
not burst unpredicted columns during learning, which in turn
prevents us from falling into a previously learned sequence
for a while (until we run through another 'pamLength' steps).
The advantge of PAM mode is that it requires fewer presentations
to learn a set of sequences which share elements. The disadvantage
of PAM mode is that if a learned sequence is immediately
followed by set set of elements that should be learned as a
2nd sequence, the first pamLength elements of that
sequence will not be learned as part of that 2nd sequence.
maxAge: Controls global decay. Global decay will only decay segments
that have not been activated for maxAge iterations, and will
only do the global decay loop every maxAge iterations. The
default (maxAge=1) reverts to the behavior where global decay
is applied every iteration to every segment. Using maxAge > 1
can significantly speed up the TP when global decay is used.
maxSeqLength: If not 0, we will never learn more than maxSeqLength inputs
in a row without starting over at start cells. This sets an
upper bound on the length of learned sequences and thus is
another means (besides maxAge and globalDecay) by which to
limit how much the TP tries to learn.
maxSegmentsPerCell: The maximum number of segments allowed on a cell. This
is used to turn on "fixed size CLA" mode. When in effect,
globalDecay is not applicable and must be set to 0 and
maxAge must be set to 0. When this is used (> 0),
maxSynapsesPerSegment must also be > 0.
maxSynapsesPerSegment: The maximum number of synapses allowed in a segment.
This is used to turn on "fixed size CLA" mode. When in effect,
globalDecay is not applicable and must be set to 0 and maxAge
must be set to 0. When this is used (> 0), maxSegmentsPerCell
must also be > 0.
outputType: Can be one of the following: 'normal', 'activeState',
'activeState1CellPerCol'.
'normal': output the OR of the active and predicted state
'activeState': output only the active state
'activeState1CellPerCol': output only the active state, and at
most 1 cell/column. If more than 1 cell is active in a column,
the one with the highest confidence is sent up.
...
"""
ConsolePrinterMixin.__init__(self, verbosity)
#print "Running TP Implementation"
#---------------------------------------------------------------------------------
# Check arguments
assert pamLength > 0, "This implementation must have pamLength > 0"
# Fixed size CLA mode?
if maxSegmentsPerCell != -1 or maxSynapsesPerSegment != -1:
assert (maxSegmentsPerCell > 0 and maxSynapsesPerSegment > 0)
assert (globalDecay == 0.0)
assert (maxAge == 0)
#---------------------------------------------------------------------------------
# Seed random number generator
if seed >= 0:
self._random = Random(seed)
else:
self._random = Random(numpy.random.randint(256))
#---------------------------------------------------------------------------------
# Store creation parameters
self.numberOfCols = numberOfCols
self.cellsPerColumn = cellsPerColumn
self._numberOfCells = numberOfCols * cellsPerColumn
self.initialPerm = numpy.float32(initialPerm)
self.connectedPerm = numpy.float32(connectedPerm)
self.minThreshold = minThreshold
self.newSynapseCount = newSynapseCount
self.permanenceInc = numpy.float32(permanenceInc)
self.permanenceDec = numpy.float32(permanenceDec)
self.permanenceMax = numpy.float32(permanenceMax)
self.globalDecay = numpy.float32(globalDecay)
self.activationThreshold = activationThreshold
self.doPooling = doPooling
self.segUpdateValidDuration = segUpdateValidDuration
self.burnIn = burnIn
self.collectStats = collectStats
self.seed = seed
self.verbosity = verbosity
self.pamLength = pamLength
self.maxAge = maxAge
self.maxInfBacktrack = maxInfBacktrack
self.maxLrnBacktrack = maxLrnBacktrack
self.maxSeqLength = maxSeqLength
self.maxSegmentsPerCell = maxSegmentsPerCell
self.maxSynapsesPerSegment = maxSynapsesPerSegment
assert (outputType in ('normal', 'activeState', 'activeState1CellPerCol'))
self.outputType = outputType
# No point having larger expiration if we are not doing pooling
if not doPooling:
self.segUpdateValidDuration = 1
#---------------------------------------------------------------------------------
# Create data structures
self.activeColumns = [] # list of indices of active columns
# Cells are indexed by column and index in the column
# Every self.cells[column][index] contains a list of segments
# Each segment is a structure of class Segment
self.cells = []
for c in xrange(self.numberOfCols):
self.cells.append([])
for i in xrange(self.cellsPerColumn):
self.cells[c].append([])
#---------------------------------------------------------------------------------
self.lrnIterationIdx = 0
self.iterationIdx = 0
self.segID = 0 # unique segment id, so we can put segments in hashes
self.currentOutput = None # for checkPrediction
# ----------------------------------------------------------------------
# pamCounter gets reset to pamLength whenever we detect that the learning
# state is making good predictions (at least half the columns predicted).
# Whenever we do not make a good prediction, we decrement pamCounter.
# When pamCounter reaches 0, we start the learn state over again at start
# cells.
self.pamCounter = self.pamLength
#---------------------------------------------------------------------------------
# Trivial prediction algorithms
if len(trivialPredictionMethods.strip()) > 0:
self.trivialPredictor = TrivialPredictor(numberOfCols, verbosity,
trivialPredictionMethods)
else:
self.trivialPredictor = None
#---------------------------------------------------------------------------------
# If True, the TP will compute a signature for each sequence
self.collectSequenceStats = False
# This gets set when we receive a reset and cleared on the first compute
# following a reset.
self.resetCalled = False
# We keep track of the average input density here
self.avgInputDensity = None
# We keep track of the length of the sequence currently being learned as well
# as a moving average of all learned sequence length here.
self.learnedSeqLength = 0
self.avgLearnedSeqLength = 0.0
# All other members are ephemeral - don't need to be saved when we save
# state. So they get separated out into _initEphemerals, which also
# gets called when we are being restored from a saved state (via
# __setstate__)
self._initEphemerals()
################################################################################
def _getEphemeralMembers(self):
"""
List of our member variables that we don't need to be saved
"""
return [
'segmentUpdates',
'_internalStats',
'_stats',
'_prevInfPatterns',
'_prevLrnPatterns',
'lrnActiveState',
'lrnPredictedState',
'infActiveState',
'infPredictedState',
'cellConfidence',
'colConfidence'
]
#############################################################################
def _initEphemerals(self):
"""
Initialize all ephemeral members after being restored to a pickled state.
"""
# We store the lists of segments updates, per cell, so that they can be applied
# later during learning, when the cell gets bottom-up activation.
# We store one list per cell. The lists are identified with a hash key which
# is a tuple (column index, cell index).
self.segmentUpdates = {}
# Allocate and reset all stats
self.resetStats()
# NOTE: We don't use the same backtrack buffer for inference and learning
# because learning has a different metric for determining if an input from
# the past is potentially useful again for backtracking.
#
# Our inference backtrack buffer. This keeps track of up to
# maxInfBacktrack of previous input. Each entry is a list of active column
# inputs.
self._prevInfPatterns = []
# Our learning backtrack buffer. This keeps track of up to maxLrnBacktrack of
# previous input. Each entry is a list of active column inputs
self._prevLrnPatterns = []
# Keep integers rather than bools. Float?
stateShape = (self.numberOfCols, self.cellsPerColumn)
self.lrnActiveState = {}
self.lrnActiveState["t"] = numpy.zeros(stateShape, dtype="int8")
self.lrnActiveState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.lrnPredictedState = {}
self.lrnPredictedState["t"] = numpy.zeros(stateShape, dtype="int8")
self.lrnPredictedState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState = {}
self.infActiveState["t"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["backup"] = numpy.zeros(stateShape, dtype="int8")
self.infActiveState["candidate"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState = {}
self.infPredictedState["t"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["t-1"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["backup"] = numpy.zeros(stateShape, dtype="int8")
self.infPredictedState["candidate"] = numpy.zeros(stateShape, dtype="int8")
self.cellConfidence = {}
self.cellConfidence["t"] = numpy.zeros(stateShape, dtype="float32")
self.cellConfidence["t-1"] = numpy.zeros(stateShape, dtype="float32")
self.cellConfidence["candidate"] = numpy.zeros(stateShape, dtype="float32")
self.colConfidence = {}
self.colConfidence["t"] = numpy.zeros(self.numberOfCols, dtype="float32")
self.colConfidence["t-1"] = numpy.zeros(self.numberOfCols, dtype="float32")
self.colConfidence["candidate"] = numpy.zeros(self.numberOfCols, dtype="float32")
def getLearnActiveStateT(self):
return self.lrnActiveState['t']
#############################################################################
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with all "ephemeral" members stripped out. "Ephemeral" members
are defined as those that do not need to be (nor should be) stored
in any kind of persistent file (e.g., NuPIC network XML file.)
"""
# Make sure we access "cells" so we'll load it if needed...
_ = self.cells
state = self.__dict__.copy()
for ephemeralMemberName in self._getEphemeralMembers():
state.pop(ephemeralMemberName, None)
state['_random'] = pickle.dumps(state['_random']) # Must be done manually
return state
#############################################################################
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
"""
self.__dict__.update(state)
self._random = pickle.loads(self._random) # Must be done manually
self._initEphemerals()
###########################################################################
def __getattr__(self, name):
"""
Patch __getattr__ so that we can catch the first access to 'cells' and load.
This function is only called when we try to access an attribute that doesn't
exist. We purposely make sure that "self.cells" doesn't exist after
unpickling so that we'll hit this, then we can load it on the first access.
If this is called at any other time, it will raise an AttributeError.
That's because:
- If 'name' is "cells", after the first call, self._realCells won't exist
so we'll get an implicit AttributeError.
- If 'name' isn't "cells", I'd expect our super wouldn't have __getattr__,
so we'll raise our own Attribute error. If the super did get __getattr__,
we'll just return what it gives us.
"""
try:
return super(TP_R, self).__getattr__(name)
except AttributeError:
raise AttributeError("'TP' object has no attribute '%s'" % name)
#############################################################################
def __del__(self):
pass
def saveToFile(self, filePath):
"""
Implemented in TP10X2
"""
pass
def loadFromFile(self, filePath):
"""
Implemented in TP10X2
"""
pass
#############################################################################
def setRandomSeed(self, seed):
""" Seed the random number generator.
This is used during unit testing to generate repeatable results.
"""
self._random = Random(seed)
#############################################################################
def getRandomState(self):
""" Return the random number state.
This is used during unit testing to generate repeatable results.
"""
return self._random.getstate()
#############################################################################
def setRandomState(self, state):
""" Set the random number state.
This is used during unit testing to generate repeatable results.
"""
self._random.setstate(state)
################################################################################
def reset(self,):
""" Reset the state of all cells.
This is normally used between sequences while training. All internal states
are reset to 0.
"""
if self.verbosity >= 3:
print "\n==== RESET ====="
self.lrnActiveState['t-1'].fill(0)
self.lrnActiveState['t'].fill(0)
self.lrnPredictedState['t-1'].fill(0)
self.lrnPredictedState['t'].fill(0)
self.infActiveState['t-1'].fill(0)
self.infActiveState['t'].fill(0)
self.infPredictedState['t-1'].fill(0)
self.infPredictedState['t'].fill(0)
self.cellConfidence['t-1'].fill(0)
self.cellConfidence['t'].fill(0)
# Flush the segment update queue
self.segmentUpdates = {}
self._internalStats['nInfersSinceReset'] = 0
#To be removed
self._internalStats['curPredictionScore'] = 0
#New prediction score
self._internalStats['curPredictionScore2'] = 0
self._internalStats['curFalseNegativeScore'] = 0
self._internalStats['curFalsePositiveScore'] = 0
self._internalStats['curMissing'] = 0
self._internalStats['curExtra'] = 0
if self.trivialPredictor is not None:
self.trivialPredictor.reset()
# When a reset occurs, set prevSequenceSignature to the signature of the
# just-completed sequence and start accumulating histogram for the next
# sequence.
self._internalStats['prevSequenceSignature'] = None
if self.collectSequenceStats:
if self._internalStats['confHistogram'].sum() > 0:
sig = self._internalStats['confHistogram'].copy()
sig.reshape(self.numberOfCols * self.cellsPerColumn)
self._internalStats['prevSequenceSignature'] = sig
self._internalStats['confHistogram'].fill(0)
self.resetCalled = True
# Clear out input history
self._prevInfPatterns = []
self._prevLrnPatterns = []
################################################################################
def resetStats(self):
""" Reset the learning and inference stats. This will usually be called by
user code at the start of each inference run (for a particular data set).
"""
self._stats = dict()
self._internalStats = dict()
self._internalStats['nInfersSinceReset'] = 0
self._internalStats['nPredictions'] = 0
#New prediction score
self._internalStats['curPredictionScore2'] = 0
self._internalStats['predictionScoreTotal2'] = 0
self._internalStats['curFalseNegativeScore'] = 0
self._internalStats['falseNegativeScoreTotal'] = 0
self._internalStats['curFalsePositiveScore'] = 0
self._internalStats['falsePositiveScoreTotal'] = 0
self._internalStats['pctExtraTotal'] = 0
self._internalStats['pctMissingTotal'] = 0
self._internalStats['curMissing'] = 0
self._internalStats['curExtra'] = 0
self._internalStats['totalMissing'] = 0
self._internalStats['totalExtra'] = 0
# Sequence signature statistics. Note that we don't reset the sequence
# signature list itself.
self._internalStats['prevSequenceSignature'] = None
if self.collectSequenceStats:
self._internalStats['confHistogram'] = \
numpy.zeros((self.numberOfCols, self.cellsPerColumn), dtype="float32")
if self.trivialPredictor is not None:
self.trivialPredictor.resetStats()
################################################################################
def getStats(self):
""" Return the current learning and inference stats. This returns a dict
containing all the learning and inference stats we have collected since the
last resetStats(). If collectStats is False, then None is returned.
The following keys are returned in the dict when collectStats is True:
nPredictions: the number of predictions. This is the total number
of inferences excluding burn-in and the last
inference.
curPredictionScore: the score for predicting the current input (predicted
during the previous inference)
curMissing: the number of bits in the current input that were
not predicted to be on.
curExtra: the number of bits in the predicted output that are
not in the next input
predictionScoreTotal: the sum of every prediction score to date
predictionScoreAvg: predictionScoreTotal / nPredictions
pctMissingTotal: the total number of bits that were missed over all
predictions
pctMissingAvg: pctMissingTotal / nPredictions
prevSequenceSignature: signature for the sequence immediately preceding the
last reset. 'None' if collectSequenceStats is False
"""
if not self.collectStats:
return None
self._stats['nPredictions'] = self._internalStats['nPredictions']
self._stats['curMissing'] = self._internalStats['curMissing']
self._stats['curExtra'] = self._internalStats['curExtra']
self._stats['totalMissing'] = self._internalStats['totalMissing']
self._stats['totalExtra'] = self._internalStats['totalExtra']
nPredictions = max(1, self._stats['nPredictions'])
#New prediction score
self._stats['curPredictionScore2'] = self._internalStats['curPredictionScore2']
self._stats['predictionScoreAvg2'] = self._internalStats['predictionScoreTotal2'] \
/ nPredictions
self._stats['curFalseNegativeScore'] = self._internalStats['curFalseNegativeScore']
self._stats['falseNegativeAvg'] = self._internalStats['falseNegativeScoreTotal'] \
/ nPredictions
self._stats['curFalsePositiveScore'] = self._internalStats['curFalsePositiveScore']
self._stats['falsePositiveAvg'] = self._internalStats['falsePositiveScoreTotal'] \
/ nPredictions
self._stats['pctExtraAvg'] = self._internalStats['pctExtraTotal'] \
/ nPredictions
self._stats['pctMissingAvg'] = self._internalStats['pctMissingTotal'] \
/ nPredictions
# This will be None if collectSequenceStats is False
self._stats['prevSequenceSignature'] = self._internalStats['prevSequenceSignature']
bestScore = -1.0
bestMethod = "none"
if self.trivialPredictor is not None:
for m in self.trivialPredictor.methods:
key = "tr_%s" % m
score = self.trivialPredictor._internalStats[m]['predictionScoreTotal2'] \
/ nPredictions
if score > bestScore:
bestScore = score
bestMethod = m
self._stats[key] = score
key = "vs_%s" % m
self._stats[key] = self._stats['predictionScoreAvg2'] - score
self._stats["vs_all"] = self._stats['predictionScoreAvg2'] - bestScore
self._stats["tr_best"] = bestMethod
return self._stats
################################################################################
def _updateStatsInferEnd(self, stats, bottomUpNZ, predictedState,
colConfidence):
""" Called at the end of learning and inference, this routine will update
a number of stats in our _internalStats dictionary, including:
1. Our computed prediction score
2. ...
Parameters:
------------------------------------------------------------------
stats: internal stats dictionary
bottomUpNZ: list of the active bottom-up inputs
predictedState: The columns we predicted on the last time step (should
match the current bottomUpNZ in the best case)
colConfidence: Column confidences we determined on the last time step
"""
# Return if not collecting stats
if not self.collectStats:
return
stats['nInfersSinceReset'] += 1
# Compute the prediction score, how well the prediction from the last
# time step predicted the current bottom-up input
patternsToCheck = [bottomUpNZ]
(numExtra2, numMissing2, confidences2) = self.checkPrediction2(
patternNZs = [bottomUpNZ],
output = predictedState,
colConfidence = colConfidence)
predictionScore, positivePredictionScore, negativePredictionScore = \
confidences2[0]
# Store the stats that don't depend on burn-in
stats['curPredictionScore2'] = float(predictionScore)
stats['curFalseNegativeScore'] = 1.0 - float(positivePredictionScore)
stats['curFalsePositiveScore'] = float(negativePredictionScore)
stats['curMissing'] = numMissing2
stats['curExtra'] = numExtra2
# ----------------------------------------------------------------------
# If we are passed the burn-in period, update the accumulated stats
# Here's what various burn-in values mean:
# 0: try to predict the first element of each sequence and all subsequent
# 1: try to predict the second element of each sequence and all subsequent
# etc.
if stats['nInfersSinceReset'] <= self.burnIn:
return
# Burn-in related stats
stats['nPredictions'] += 1
numExpected = max(1.0, float(len(bottomUpNZ)))
stats['totalMissing'] += numMissing2
stats['totalExtra'] += numExtra2
stats['pctExtraTotal'] += 100.0 * numExtra2 / numExpected
stats['pctMissingTotal'] += 100.0 * numMissing2 / numExpected
stats['predictionScoreTotal2'] += float(predictionScore)
stats['falseNegativeScoreTotal'] += 1.0 - float(positivePredictionScore)
stats['falsePositiveScoreTotal'] += float(negativePredictionScore)
if self.collectSequenceStats:
# Collect cell confidences for every cell that correctly predicted current
# bottom up input. Normalize confidence across each column
cc = self.cellConfidence['t-1'] * self.infActiveState['t']
sconf = cc.sum(axis=1)
for c in range(self.numberOfCols):
if sconf[c] > 0:
cc[c,:] /= sconf[c]
# Update cell confidence histogram: add column-normalized confidence
# scores to the histogram
self._internalStats['confHistogram'] += cc
################################################################################
# The following print functions for debugging.
################################################################################
def printState(self, aState):
"""Print an integer array that is the same shape as activeState."""
def formatRow(var, i):
s = ''
for c in range(self.numberOfCols):
if c > 0 and c % 10 == 0:
s += ' '
s += str(var[c,i])
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatRow(aState,i)
##########################################################################
def printConfidence(self, aState, maxCols = 20):
"""Print a floating point array that is the same shape as activeState."""
def formatFPRow(var, i):
s = ''
for c in range(min(maxCols,self.numberOfCols)):
if c > 0 and c % 10 == 0:
s += ' '
s += ' %5.3f'%var[c,i]
s += ' '
return s
for i in xrange(self.cellsPerColumn):
print formatFPRow(aState,i)
##########################################################################
def printColConfidence(self, aState, maxCols = 20):
"""Print up to maxCols number from a flat floating point array."""
def formatFPRow(var):
s = ''
for c in range(min(maxCols,self.numberOfCols)):
if c > 0 and c % 10 == 0:
s += ' '
s += ' %5.3f'%var[c]
s += ' '
return s
print formatFPRow(aState)
##########################################################################
def printStates(self, printPrevious = True, printLearnState = True):
nSpaces = 2 * self.numberOfCols - 3
def formatRow(var, i):
s = ''
for c in range(self.numberOfCols):
if c > 0 and c % 10 == 0:
s += ' '
s += str(var[c,i])
s += ' '
return s
print "\nInference Active state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.infActiveState['t-1'], i),
print formatRow(self.infActiveState['t'],i)
print "Inference Predicted state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.infPredictedState['t-1'], i),
print formatRow(self.infPredictedState['t'],i)
if printLearnState:
print "\nLearn Active state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.lrnActiveState['t-1'], i),
print formatRow(self.lrnActiveState['t'],i)
print "Learn Predicted state"
for i in xrange(self.cellsPerColumn):
if printPrevious:
print formatRow(self.lrnPredictedState['t-1'], i),
print formatRow(self.lrnPredictedState['t'],i)
##########################################################################
def printOutput(self, y):
print "Output"
for i in xrange(self.cellsPerColumn):
for c in xrange(self.numberOfCols):
print int(y[c,i]),
print
##########################################################################
def printInput(self, x):
print "Input"
for c in xrange(self.numberOfCols):
print int(x[c]),
print
##########################################################################
def printParameters(self):
"""Print the parameter settings for the TP."""
print "numberOfCols=",self.numberOfCols
print "cellsPerColumn=",self.cellsPerColumn
print "minThreshold=",self.minThreshold
print "newSynapseCount=",self.newSynapseCount
print "activationThreshold=",self.activationThreshold
print
print "initialPerm=",self.initialPerm
print "connectedPerm=",self.connectedPerm
print "permanenceInc=", self.permanenceInc
print "permanenceDec=", self.permanenceDec
print "permanenceMax=", self.permanenceMax
print "globalDecay=", self.globalDecay
print
print "doPooling=",self.doPooling
print "segUpdateValidDuration=",self.segUpdateValidDuration
print "pamLength=", self.pamLength
##########################################################################
def printActiveIndices(self, state, andValues=False):
"""Print the list of [column, cellIdx] indices for each of the active
cells in state. """
if len(state.shape) == 2:
(cols, cellIdxs) = state.nonzero()
else:
cols = state.nonzero()[0]
cellIdxs = numpy.zeros(len(cols))
if len(cols) == 0:
print "NONE"
return
prevCol = -1
for (col,cellIdx) in zip(cols, cellIdxs):
if col != prevCol:
if prevCol != -1:
print "] ",
print "Col %d: [" % (col),
prevCol = col
if andValues:
if len(state.shape) == 2:
value = state[col, cellIdx]
else:
value = state[col]
print "%d: %s," % (cellIdx, value),
else:
print "%d," % (cellIdx),
print "]"
##########################################################################
def printComputeEnd(self, output, learn=False):
""" Called at the end of inference to print out various diagnostic
information based on the current verbosity level.
"""
if self.verbosity >= 3:
print "----- computeEnd summary: "
print "numBurstingCols: %s, " % (self.infActiveState['t'].min(axis=1).sum()),
print "curPredScore2: %s, " % (self._internalStats['curPredictionScore2']),
print "curFalsePosScore: %s, " % (self._internalStats['curFalsePositiveScore']),
print "1-curFalseNegScore: %s, " % (1-self._internalStats['curFalseNegativeScore'])
print "numSegments: ", self.getNumSegments(),
print "avgLearnedSeqLength: ", self.avgLearnedSeqLength
print "----- infActiveState (%d on) ------" \
% (self.infActiveState['t'].sum())
self.printActiveIndices(self.infActiveState['t'])
if self.verbosity >= 6:
self.printState(self.infActiveState['t'])
print "----- infPredictedState (%d on)-----" \
% (self.infPredictedState['t'].sum())
self.printActiveIndices(self.infPredictedState['t'])
if self.verbosity >= 6:
self.printState(self.infPredictedState['t'])
print "----- lrnActiveState (%d on) ------" \
% (self.lrnActiveState['t'].sum())
self.printActiveIndices(self.lrnActiveState['t'])
if self.verbosity >= 6:
self.printState(self.lrnActiveState['t'])
print "----- lrnPredictedState (%d on)-----" \
% (self.lrnPredictedState['t'].sum())
self.printActiveIndices(self.lrnPredictedState['t'])
if self.verbosity >= 6:
self.printState(self.lrnPredictedState['t'])
print "----- cellConfidence -----"
self.printActiveIndices(self.cellConfidence['t'], andValues=True)
if self.verbosity >= 6:
self.printConfidence(self.cellConfidence['t'])
print "----- colConfidence -----"
self.printActiveIndices(self.colConfidence['t'], andValues=True)
print "----- cellConfidence[t-1] for currently active cells -----"
cc = self.cellConfidence['t-1'] * self.infActiveState['t']
self.printActiveIndices(cc, andValues=True)
if self.verbosity == 4:
print "Cells, predicted segments only:"
self.printCells(predictedOnly=True)
elif self.verbosity >= 5:
print "Cells, all segments:"
self.printCells(predictedOnly=False)
print
elif self.verbosity >= 1:
print "TP: active outputs(%d):" % len(output.nonzero()[0]),
self.printActiveIndices(output.reshape(self.numberOfCols,
self.cellsPerColumn))
################################################################################
def printSegmentUpdates(self):
print "=== SEGMENT UPDATES ===, Num = ",len(self.segmentUpdates)
for key, updateList in self.segmentUpdates.iteritems():
c,i = key[0], key[1]
print c,i,updateList
################################################################################
def printCell(self, c, i, onlyActiveSegments=False):
if len(self.cells[c][i]) > 0:
print "Column", c, "Cell", i, ":",
print len(self.cells[c][i]), "segment(s)"
for j,s in enumerate(self.cells[c][i]):
isActive = self.isSegmentActive(s, self.infActiveState['t'])
if not onlyActiveSegments or isActive:
isActiveStr = "*" if isActive else " "
print " %sSeg #%-3d" % (isActiveStr, j),
s.debugPrint()
################################################################################
def printCells(self, predictedOnly=False):
if predictedOnly:
print "--- PREDICTED CELLS ---"
else:
print "--- ALL CELLS ---"
print "Activation threshold=", self.activationThreshold,
print "min threshold=", self.minThreshold,
print "connected perm=", self.connectedPerm
for c in xrange(self.numberOfCols):
for i in xrange(self.cellsPerColumn):
if not predictedOnly or self.infPredictedState['t'][c,i]:
self.printCell(c, i, predictedOnly)
#############################################################################
def getNumSegmentsInCell(self, c, i):
""" Return the total number of synapses in cell (c,i)
"""
return len(self.cells[c][i])
#############################################################################
def getNumSynapses(self):
""" Return the total number of synapses
"""
nSyns = self.getSegmentInfo()[1]
return nSyns
#############################################################################
def getNumStrongSynapses(self):
""" Return the total number of strong synapses
"""
#todo: implement this, it is used by the node's getParameter() call
return 0
#############################################################################
def getNumStrongSynapsesPerTimeSlot(self):
""" Return the total number of strong synapses per time slot
"""
#todo: implement this, it is used by the node's getParameter() call
return 0
#############################################################################
def getNumSynapsesPerSegmentMax(self):
""" Return the max # of synapses seen in any one segment
"""
#todo: implement this, it is used by the node's getParameter() call
return 0
#############################################################################
def getNumSynapsesPerSegmentAvg(self):
""" Return the average number of synapses per segment
"""
return float(self.getNumSynapses()) / max(1, self.getNumSegments())
#############################################################################
def getNumSegments(self):
""" Return the total number of segments
"""
nSegs = self.getSegmentInfo()[0]
return nSegs
#############################################################################
def getNumCells(self):
""" Return the total number of cells
"""
return self.numberOfCols * self.cellsPerColumn
################################################################################
def getSegmentOnCell(self, c, i, segIdx):
"""Return the segment on cell (c,i) with index sidx.
Returns the segment as following list:
[ [segmentID, sequenceSegmentFlag, positive activations,
total activations, last active iteration,
lastPosDutyCycle, lastPosDutyCycleIteration],
[col1, idx1, perm1],
[col2, idx2, perm2], ...
]
"""
seg = self.cells[c][i][segIdx]
retlist = [[seg.segID, seg.isSequenceSeg, seg.positiveActivations,
seg.totalActivations, seg.lastActiveIteration,
seg._lastPosDutyCycle, seg._lastPosDutyCycleIteration]]
retlist += seg.syns
return retlist
#############################################################################
class SegmentUpdate():
"""
Class used to carry instructions for updating a segment.
"""
def __init__(self, c, i, seg=None, activeSynapses=[]):
self.columnIdx = c
self.cellIdx = i
self.segment = seg # The segment object itself, not an index (can be None)
self.activeSynapses = activeSynapses
self.sequenceSegment = False
self.phase1Flag = False
# Set true if segment only reaches activationThreshold when including
# not fully connected synapses.
self.weaklyPredicting = False
# Just for debugging
def __str__(self):
return "Seg update: cell=[%d,%d]" % (self.columnIdx, self.cellIdx) \
+ ", seq seg=" + str(self.sequenceSegment) \
+ ", seg=" + str(self.segment) \
+ ", synapses=" + str(self.activeSynapses)
################################################################################
def addToSegmentUpdates(self, c, i, segUpdate):
"""
Store a dated potential segment update. The "date" (iteration index) is used
later to determine whether the update is too old and should be forgotten.
This is controlled by parameter segUpdateValidDuration.
"""
# Sometimes we might be passed an empty update
if segUpdate is None or len(segUpdate.activeSynapses) == 0:
return
key = (c,i) # key = (column index, cell index in column)
# todo: scan list of updates for that cell and consolidate?
# But watch out for dates!
if self.segmentUpdates.has_key(key):
self.segmentUpdates[key] += [(self.lrnIterationIdx, segUpdate)]
else:
self.segmentUpdates[key] = [(self.lrnIterationIdx, segUpdate)]
################################################################################
def removeSegmentUpdate(self, updateInfo):
"""Remove a segment update (called when seg update expires or is processed)
Parameters:
--------------------------------------------------------------
updateInfo: (creationDate, SegmentUpdate)
"""
# An updateInfo contains (creationDate, SegmentUpdate)
(creationDate, segUpdate) = updateInfo
# Key is stored in segUpdate itself...
key = (segUpdate.columnIdx, segUpdate.cellIdx)
self.segmentUpdates[key].remove(updateInfo)
#############################################################################
def computeOutput(self):
"""Computes output for both learning and inference. In both cases, the
output is the boolean OR of activeState and predictedState at t.
Stores currentOutput for checkPrediction."""
# todo: This operation can be sped up by:
# 1.) Pre-allocating space for the currentOutput
# 2.) Making predictedState and activeState of type 'float32' up front
# 3.) Using logical_or(self.predictedState['t'], self.activeState['t'],
# self.currentOutput)
#
if self.outputType == 'activeState1CellPerCol':
# Fire only the most confident cell in columns that have 2 or more
# active cells
mostActiveCellPerCol = self.cellConfidence['t'].argmax(axis=1)
self.currentOutput = numpy.zeros(self.infActiveState['t'].shape,
dtype='float32')
# Turn on the most confident cell in each column. Note here that
# Columns refers to TP columns, even though each TP column is a row
# in the numpy array.
numCols = self.currentOutput.shape[0]
self.currentOutput[(xrange(numCols), mostActiveCellPerCol)] = 1
# Don't turn on anything in columns which are not active at all
activeCols = self.infActiveState['t'].max(axis=1)
inactiveCols = numpy.where(activeCols==0)[0]
self.currentOutput[inactiveCols,:] = 0
elif self.outputType == 'activeState':
self.currentOutput = self.infActiveState['t']
elif self.outputType == 'normal':
self.currentOutput = \
logical_or(self.infPredictedState['t'], self.infActiveState['t'])
else:
raise RuntimeError("Unimplemented outputType")
return self.currentOutput.reshape(-1).astype('float32')
#############################################################################
def getActiveState(self):
""" Return the current active state. This is called by the node to
obtain the sequence output of the TP.
"""
# todo: This operation can be sped up by making activeState of
# type 'float32' up front.
return self.infActiveState['t'].reshape(-1).astype('float32')
#############################################################################
def predict(self, nSteps):
"""
This function gives the future predictions for <nSteps> timesteps starting
from the current TP state. The TP is returned to its original state at the
end before returning.
1) We save the TP state.
2) Loop for nSteps
a) Turn-on with lateral support from the current active cells
b) Set the predicted cells as the next step's active cells. This step
in learn and infer methods use input here to correct the predictions.
We don't use any input here.
3) Revert back the TP state to the time before prediction
Parameters:
--------------------------------------------
nSteps: The number of future time steps to be predicted
retval: all the future predictions - a numpy array of type "float32" and
shape (nSteps, numberOfCols).
The ith row gives the tp prediction for each column at
a future timestep (t+i+1).
"""
# Save the TP dynamic state, we will use to revert back in the end
pristineTPDynamicState = self._getTPDynamicState()
assert (nSteps>0)
# multiStepColumnPredictions holds all the future prediction.
multiStepColumnPredictions = numpy.zeros((nSteps, self.numberOfCols),
dtype="float32")
# This is a (nSteps-1)+half loop. Phase 2 in both learn and infer methods
# already predicts for timestep (t+1). We use that prediction for free and save
# the half-a-loop of work.
step = 0
while True:
# We get the prediction for the columns in the next time step from
# the topDownCompute method. It internally uses confidences.
multiStepColumnPredictions[step,:] = self.topDownCompute()
# Cleanest way in python to handle one and half loops
if step == nSteps-1:
break
step += 1
# Copy t-1 into t
self.infActiveState['t-1'][:,:] = self.infActiveState['t'][:,:]
self.infPredictedState['t-1'][:,:] = self.infPredictedState['t'][:,:]
self.cellConfidence['t-1'][:,:] = self.cellConfidence['t'][:,:]
# Predicted state at "t-1" becomes the active state at "t"
self.infActiveState['t'][:,:] = self.infPredictedState['t-1'][:,:]
# Predicted state and confidence are set in phase2.
self.infPredictedState['t'].fill(0)
self.cellConfidence['t'].fill(0.0)
self.inferPhase2()
# Revert the dynamic state to the saved state
self._setTPDynamicState(pristineTPDynamicState)
return multiStepColumnPredictions
#############################################################################
def _getTPDynamicStateVariableNames(self,):
"""
Any newly added dynamic states in the TP should be added to this list.
Parameters:
--------------------------------------------
retval: The list of names of TP dynamic state variables.
"""
return ["infActiveState",
"infPredictedState",
"lrnActiveState",
"lrnPredictedState",
"cellConfidence",
"colConfidence",
]
#############################################################################
def _getTPDynamicState(self,):
"""
Parameters:
--------------------------------------------
retval: A dict with all the dynamic state variable names as keys and
their values at this instant as values.
"""
tpDynamicState = dict()
for variableName in self._getTPDynamicStateVariableNames():
tpDynamicState[variableName] = copy.deepcopy(self.__dict__[variableName])
return tpDynamicState
#############################################################################
def _setTPDynamicState(self, tpDynamicState):
"""
Set all the dynamic state variables from the <tpDynamicState> dict.
<tpDynamicState> dict has all the dynamic state variable names as keys and
their values at this instant as values.
We set the dynamic state variables in the tp object with these items.
"""
for variableName in self._getTPDynamicStateVariableNames():
self.__dict__[variableName] = tpDynamicState.pop(variableName)
################################################################################
def _updateAvgLearnedSeqLength(self, prevSeqLength):
""" Update our moving average of learned sequence length.
"""
if self.lrnIterationIdx < 100:
alpha = 0.5
else:
alpha = 0.1
#print "before", self.avgLearnedSeqLength, "inc. ", prevSeqLength,
self.avgLearnedSeqLength = (1.0 - alpha) * self.avgLearnedSeqLength \
+ (alpha * prevSeqLength)
#print "after", self.avgLearnedSeqLength
################################################################################
def getAvgLearnedSeqLength(self):
""" Return our moving average of learned sequence length.
"""
return self.avgLearnedSeqLength
#############################################################################
def inferBacktrack(self, activeColumns):
""" This "backtracks" our inference state, trying to see if we can lock onto
the current set of inputs by assuming the sequence started up to N steps
ago on start cells.
This will adjust infActiveState['t'] if it does manage to lock on to a
sequence that started earlier. It will also compute infPredictedState['t']
based on the possibly updated infActiveState['t'], so there is no need to
call inferPhase2() after calling inferBacktrack().
This looks at:
- infActiveState['t']
This updates/modifies:
- infActiveState['t']
- infPredictedState['t']
- colConfidence['t']
- cellConfidence['t']
Parameters:
--------------------------------------------------------------------
activeColumns: The list of active column indices
How it works:
-------------------------------------------------------------------
This method gets called from updateInferenceState when we detect either of the
following two conditions:
1.) The current bottom-up input had too many un-expected columns
2.) We fail to generate a sufficient number of predicted columns for the
next time step.
Either of these two conditions indicate that we have fallen out of a
learned sequence.
Rather than simply "giving up" and bursting on the unexpected input
columns, a better approach is to see if perhaps we are in a sequence that
started a few steps ago. The real world analogy is that you are driving
along and suddenly hit a dead-end, you will typically go back a few turns
ago and pick up again from a familiar intersection.
This back-tracking goes hand in hand with our learning methodology, which always
tries to learn again from start cells after it loses context. This results
in a network that has learned multiple, overlapping paths through the input
data, each starting at different points. The lower the global decay and the
more repeatability in the data, the longer each of these paths will end up
being.
The goal of this function is to find out which starting point in the past leads
to the current input with the most context as possible. This gives us the
best chance of predicting accurately going forward. Consider the following
example, where you have learned the following sub-sequences which have the
given frequencies:
? - Q - C - D - E 10X seq 0
? - B - C - D - F 1X seq 1
? - B - C - H - I 2X seq 2
? - B - C - D - F 3X seq 3
? - Z - A - B - C - D - J 2X seq 4
? - Z - A - B - C - H - I 1X seq 5
? - Y - A - B - C - D - F 3X seq 6
----------------------------------------
W - X - Z - A - B - C - D <= input history
^
current time step
Suppose, in the current time step, the input pattern is D and you have not
predicted D, so you need to backtrack. Suppose we can backtrack up to 6 steps
in the past, which path should we choose? From the table above, we can see
that the correct answer is to assume we are in seq 1. How do we implement the
backtrack to give us this right answer? The current implementation takes the
following approach:
1.) Start from the farthest point in the past.
2.) For each starting point S, calculate the confidence of the current
input, conf(startingPoint=S), assuming we followed that sequence.
Note that we must have learned at least one sequence that starts at
point S.
3.) If conf(startingPoint=S) is significantly different from
conf(startingPoint=S-1), then choose S-1 as the starting point.
The assumption here is that starting point S-1 is the starting point of
a learned sub-sequence that includes the current input in it's path and
that started the longest ago. It thus has the most context and will be
the best predictor going forward.
From the statistics in the above table, we can compute what the confidences
will be for each possible starting point:
startingPoint confidence of D
-----------------------------------------
B (t-2) 4/6 = 0.667 (seq 1,3)/(seq 1,2,3)
Z (t-4) 2/3 = 0.667 (seq 4)/(seq 4,5)
First of all, we do not compute any confidences at starting points t-1, t-3,
t-5, t-6 because there are no learned sequences that start at those points.
Notice here that Z is the starting point of the longest sub-sequence leading
up to the current input. Event though starting at t-2 and starting at t-4
give the same confidence value, we choose the sequence starting at t-4
because it gives the most context, and it mirrors the way that learning
extends sequences.
"""
# How much input history have we accumulated?
# The current input is always at the end of self._prevInfPatterns (at
# index -1), but it is also evaluated as a potential starting point by
# turning on it's start cells and seeing if it generates sufficient
# predictions going forward.
numPrevPatterns = len(self._prevInfPatterns)
if numPrevPatterns <= 0:
return
# This is an easy to use label for the current time step
currentTimeStepsOffset = numPrevPatterns - 1
# Save our current active state in case we fail to find a place to restart
# todo: save infActiveState['t-1'], infPredictedState['t-1']?
self.infActiveState['backup'][:,:] = self.infActiveState['t'][:,:]
# Save our t-1 predicted state because we will write over it as as evaluate
# each potential starting point.
self.infPredictedState['backup'][:,:] = self.infPredictedState['t-1'][:,:]
# We will record which previous input patterns did not generate predictions
# up to the current time step and remove all the ones at the head of the
# input history queue so that we don't waste time evaluating them again at
# a later time step.
badPatterns = []
# ====================================================================
# Let's go back in time and replay the recent inputs from start cells and
# see if we can lock onto this current set of inputs that way.
#
# Start the farthest back and work our way forward. For each starting point,
# See if firing on start cells at that point would predict the current
# input as well as generate sufficient predictions for the next time step.
#
# We want to pick the point closest to the current time step that gives us
# the relevant confidence. Think of this example, where we are at D and need to
# A - B - C - D
# decide if we should backtrack to C, B, or A. Suppose B-C-D is a high order
# sequence and A is unrelated to it. If we backtrock to B would we get a
# certain confidence of D, but if went went farther back, to A, the confidence
# wouldn't change, since A has no impact on the B-C-D series.
#
# So, our strategy will be to pick the "B" point, since choosing the A point
# does not impact our confidences going forward at all.
inSequence = False
candConfidence = None
candStartOffset = None
for startOffset in range(0, numPrevPatterns):
# If we have a candidate already in the past, don't bother falling back
# to start cells on the current input.
if startOffset == currentTimeStepsOffset and candConfidence is not None:
break
if self.verbosity >= 3:
print "Trying to lock-on using startCell state from %d steps ago:" \
% (numPrevPatterns - 1 - startOffset), \
self._prevInfPatterns[startOffset]
# Play through starting from starting point 'startOffset'
inSequence = False
for offset in range(startOffset, numPrevPatterns):
# If we are about to set the active columns for the current time step
# based on what we predicted, capture and save the total confidence of
# predicting the current input
if offset == currentTimeStepsOffset:
totalConfidence = self.colConfidence['t'][activeColumns].sum()
# Compute activeState[t] given bottom-up and predictedState[t-1]
self.infPredictedState['t-1'][:,:] = self.infPredictedState['t'][:,:]
inSequence = self.inferPhase1(self._prevInfPatterns[offset],
useStartCells = (offset == startOffset))
if not inSequence:
break
# Compute predictedState['t'] given activeState['t']
if self.verbosity >= 3:
print " backtrack: computing predictions from ", \
self._prevInfPatterns[offset]
inSequence = self.inferPhase2()
if not inSequence:
break
# If starting from startOffset got lost along the way, mark it as an
# invalid start point.
if not inSequence:
badPatterns.append(startOffset)
continue
# If we got to here, startOffset is a candidate starting point.
# Save this state as a candidate state. It will become the chosen state if
# we detect a change in confidences starting at a later startOffset
candConfidence = totalConfidence
candStartOffset = startOffset
if self.verbosity >= 3 and startOffset != currentTimeStepsOffset:
print " # Prediction confidence of current input after starting %d " \
"steps ago:" % (numPrevPatterns - 1 - startOffset), totalConfidence
if candStartOffset == currentTimeStepsOffset: # no more to try
break
self.infActiveState['candidate'][:,:] = self.infActiveState['t'][:,:]
self.infPredictedState['candidate'][:,:] = self.infPredictedState['t'][:,:]
self.cellConfidence['candidate'][:,:] = self.cellConfidence['t'][:,:]
self.colConfidence['candidate'][:] = self.colConfidence['t'][:]
break
# =======================================================================
# If we failed to lock on at any starting point, fall back to the original
# active state that we had on entry
if candStartOffset is None:
if self.verbosity >= 3:
print "Failed to lock on. Falling back to bursting all unpredicted."
self.infActiveState['t'][:,:] = self.infActiveState['backup'][:,:]
self.inferPhase2()
else:
if self.verbosity >= 3:
print "Locked on to current input by using start cells from %d " \
" steps ago:" % (numPrevPatterns - 1 - candStartOffset), \
self._prevInfPatterns[candStartOffset]
# Install the candidate state, if it wasn't the last one we evaluated.
if candStartOffset != currentTimeStepsOffset:
self.infActiveState['t'][:,:] = self.infActiveState['candidate'][:,:]
self.infPredictedState['t'][:,:] = self.infPredictedState['candidate'][:,:]
self.cellConfidence['t'][:,:] = self.cellConfidence['candidate'][:,:]
self.colConfidence['t'][:] = self.colConfidence['candidate'][:]
# =======================================================================
# Remove any useless patterns at the head of the previous input pattern
# queue.
for i in range(numPrevPatterns):
if i in badPatterns or (candStartOffset is not None and i <= candStartOffset):
if self.verbosity >= 3:
print "Removing useless pattern from history:", self._prevInfPatterns[0]
self._prevInfPatterns.pop(0)
else:
break
# Restore the original predicted state.
self.infPredictedState['t-1'][:,:] = self.infPredictedState['backup'][:,:]
##########################################################################
def inferPhase1(self, activeColumns):
""" Update the inference active state from the last set of predictions
and the current bottom-up.
This looks at:
- infPredictedState['t-1']
This modifies:
- infActiveState['t']
Parameters:
------------------------------------------------------------------
activeColumns: list of active bottom-ups
useStartCells: If true, ignore previous predictions and simply turn on
the start cells in the active columns
retval: True if the current input was sufficiently predicted, OR
if we started over on startCells.
False indicates that the current input was NOT predicted,
and we are now bursting on most columns.
"""
# =========================================================================
# Init to zeros to start
self.infActiveState['t'].fill(0)
# =========================================================================
# Phase 1 - turn on predicted cells in each column receiving bottom-up
# If we are following a reset, activate only the start cell in each
# column that has bottom-up
numPredictedColumns = 0
for c in activeColumns:
predictingCells = numpy.where(self.infPredictedState['t-1'][c] == 1)[0]
numPredictingCells = len(predictingCells)
if numPredictingCells > 0:
self.infActiveState['t'][c, predictingCells] = 1
numPredictedColumns += 1
else:
self.infActiveState['t'][c,:] = 1 # whole column bursts
##########################################################################
def inferPhase2(self):
""" Phase 2 for the inference state. The computes the predicted state, then
checks to insure that the predicted state is not over-saturated, i.e.
look too close like a burst. This indicates that there were so many
separate paths learned from the current input columns to the predicted
input columns that bursting on the current input columns is most likely
generated mix and match errors on cells in the predicted columns. If
we detect this situation, we instead turn on only the start cells in the
current active columns and re-generate the predicted state from those.
This looks at:
- infActiveState['t']
This modifies:
- infPredictedState['t']
- colConfidence['t']
- cellConfidence['t']
Parameters:
------------------------------------------------------------------
retval: True if we have a decent guess as to the next input.
Returing False from here indicates to the caller that we have
reached the end of a learned sequence.
"""
# Init to zeros to start
self.infPredictedState['t'].fill(0)
self.cellConfidence['t'].fill(0)
self.colConfidence['t'].fill(0)
# =========================================================================
# Phase 2 - Compute new predicted state and update cell and column
# confidences
for c in xrange(self.numberOfCols):
# For each cell in the column
for i in xrange(self.cellsPerColumn):
# For each segment in the cell
for s in self.cells[c][i]:
# See if it has the min number of active synapses
numActiveSyns = self.getSegmentActivityLevel(s,
self.infActiveState['t'], connectedSynapsesOnly=False)
if numActiveSyns < self.activationThreshold:
continue
# Incorporate the confidence into the owner cell and column
if self.verbosity >= 6:
print "incorporating DC from cell[%d,%d]: " % (c,i),
s.debugPrint()
dc = s.dutyCycle()
self.cellConfidence['t'][c,i] += dc
self.colConfidence['t'][c] += dc
# If we reach threshold on the connected synapses, predict it
# If not active, skip over it
if self.isSegmentActive(s, self.infActiveState['t']):
self.infPredictedState['t'][c,i] = 1
def updateInferenceState(self, activeColumns):
""" Update the inference state. Called from compute() on every iteration
Parameters:
--------------------------------------------------------------
activeColumns: The list of active column indices
"""
# =========================================================================
# Copy t to t-1
self.infActiveState['t-1'][:,:] = self.infActiveState['t'][:,:]
self.infPredictedState['t-1'][:,:] = self.infPredictedState['t'][:,:]
self.cellConfidence['t-1'][:,:] = self.cellConfidence['t'][:,:]
self.colConfidence['t-1'][:] = self.colConfidence['t'][:]
# Each phase will zero/initilize the 't' states that it affects
# -------------------------------------------------------------------
inSequence = self.inferPhase1(activeColumns, self.resetCalled)
# ====================================================================
# Compute the predicted cells and the cell and column confidences
self.inferPhase2()
#############################################################################
def learnPhase1(self, activeColumns, readOnly=False):
""" Compute the learning active state given the predicted state and
the bottom-up input.
This looks at:
- lrnActiveState['t-1']
- lrnPredictedState['t-1']
This modifies:
- lrnActiveState['t']
- lrnActiveState['t-1']
Parameters:
------------------------------------------------------------------
activeColumns: list of active bottom-ups
readOnly: True if being called from backtracking logic.
This tells us not to increment any segment
duty cycles or queue up any updates.
retval: True if the current input was sufficiently predicted, OR
if we started over on startCells.
False indicates that the current input was NOT predicted,
well enough to consider it as "inSequence"
"""
# Save previous active state and start out on a clean slate
self.lrnActiveState['t'].fill(0)
# --------------------------------------------------------------------
# For each column, turn on the predicted cell. There will always be at most
# one predicted cell per column
numUnpredictedColumns = 0
for c in activeColumns:
predictingCells = numpy.where(self.lrnPredictedState['t-1'][c] == 1)[0]
numPredictedCells = len(predictingCells)
assert numPredictedCells <= 1
# If we have a predicted cell, turn it on. The segment's posActivation
# count will have already been incremented by processSegmentUpdates
if numPredictedCells == 1:
i = predictingCells[0]
self.lrnActiveState['t'][c,i] = 1
continue
numUnpredictedColumns += 1
if readOnly:
continue
# ----------------------------------------------------------------
# If no predicted cell, pick the closest matching one to reinforce, or
# if none exists, create a new segment on a cell in that column
i, s, numActive = self.getBestMatchingCell(c, self.lrnActiveState['t-1'],
self.minThreshold)
if s is not None and s.isSequenceSegment():
if self.verbosity >= 4:
print "Learn branch 0, found segment match. Learning on col=",c
self.lrnActiveState['t'][c,i] = 1
segUpdate = self.getSegmentActiveSynapses(c, i, s,
self.lrnActiveState['t-1'], newSynapses = True)
s.totalActivations += 1
# This will update the permanences, posActivationsCount, and the
# lastActiveIteration (age).
trimSegment = self.adaptSegment(segUpdate)
if trimSegment:
self.trimSegmentsInCell(c, i, [s], minPermanence = 0.00001,
minNumSyns = 0)
# If no close match exists, create a new one
else:
# Choose a cell in this column to add a new segment to
i = self.getCellForNewSegment(c)
if (self.verbosity >= 4):
print "Learn branch 1, no match. Learning on col=",c,
print ", newCellIdxInCol=",i
self.lrnActiveState['t'][c,i] = 1
segUpdate = self.getSegmentActiveSynapses(c, i, None,
self.lrnActiveState['t-1'], newSynapses = True)
segUpdate.sequenceSegment = True # Make it a sequence segment
self.adaptSegment(segUpdate) # No need to check whether perm reached 0
# ----------------------------------------------------------------------
# Determine if we are out of sequence or not and reset our PAM counter
# if we are in sequence
numBottomUpColumns = len(activeColumns)
if numUnpredictedColumns < numBottomUpColumns/2:
return True # in sequence
else:
return False # out of sequence
#############################################################################
def learnPhase2(self, readOnly=False):
""" Compute the predicted segments given the current set of active cells.
This computes the lrnPredictedState['t'] and queues up any segments that
became active (and the list of active synapses for each segment) into
the segmentUpdates queue
Parameters:
--------------------------------------------------
readOnly: True if being called from backtracking logic.
This tells us not to increment any segment
duty cycles or queue up any updates.
This looks at:
- lrnActiveState['t']
This modifies:
- lrnPredictedState['t']
- segmentUpdates
"""
# Clear out predicted state to start with
self.lrnPredictedState['t'].fill(0)
# ====================================================================
# Compute new predicted state. When computing predictions for
# phase 2, we predict at most one cell per column (the one with the best
# matching segment).
for c in xrange(self.numberOfCols):
# Is there a cell predicted to turn on in this column?
i, s, numActive = self.getBestMatchingCell(c, self.lrnActiveState['t'],
minThreshold = self.activationThreshold)
if i is None:
continue
# Turn on the predicted state for the best matching cell and queue
# the pertinent segment up for an update, which will get processed if
# the cell receives bottom up in the future.
self.lrnPredictedState['t'][c,i] = 1
if readOnly:
continue
# Queue up this segment for updating
segUpdate = self.getSegmentActiveSynapses(c, i, s,
activeState=self.lrnActiveState['t'],
newSynapses=(numActive < self.newSynapseCount))
s.totalActivations += 1 # increment totalActivations
self.addToSegmentUpdates(c, i, segUpdate)
if self.doPooling:
# creates a new pooling segment if no best matching segment found
# sum(all synapses) >= minThreshold, "weak" activation
predSegment = self.getBestMatchingSegment(c, i,
self.lrnActiveState['t-1'])
segUpdate = self.getSegmentActiveSynapses(c, i, predSegment,
self.lrnActiveState['t-1'], newSynapses=True)
self.addToSegmentUpdates(c, i, segUpdate)
#############################################################################
def updateLearningState(self, activeColumns):
""" Update the learning state. Called from compute() on every iteration
Parameters:
--------------------------------------------------
activeColumns: list of active column indices
"""
# ====================================================================
# Copy predicted and active states into t-1
self.lrnPredictedState['t-1'][:,:] = self.lrnPredictedState['t'][:,:]
self.lrnActiveState['t-1'][:,:] = self.lrnActiveState['t'][:,:]
# ====================================================================
# Process queued up segment updates, now that we have bottom-up, we
# can update the permanences on the cells that we predicted to turn on
# and did receive bottom-up
self.processSegmentUpdates(activeColumns)
# ====================================================================
# Phase 1 - turn on the predicted cell in each column that received
# bottom-up. If there was no predicted cell, pick one to learn to.
if not self.resetCalled:
# Uses lrnActiveState['t-1'] and lrnPredictedState['t-1']
# computes lrnActiveState['t']
inSequence = self.learnPhase1(activeColumns)
# Reset our PAM counter if we are in sequence
if inSequence:
self.pamCounter = self.pamLength
# Print status of PAM counter, learned sequence length
if self.verbosity >= 3:
print "pamCounter = ", self.pamCounter, "seqLength = ", \
self.learnedSeqLength
# ====================================================================
# Start over on start cells if any of the following occur:
# 1.) A reset was just called
# 2.) We have been loo long out of sequence (the pamCounter has expired)
# 3.) We have reached maximum allowed sequence length.
#
# Note that, unless we are following a reset, we also just learned or
# re-enforced connections to the current set of active columns because
# this input is still a valid prediction to learn.
#
# It is especially helpful to learn the connections to this input when
# you have a maxSeqLength constraint in place. Otherwise, you will have
# no continuity at all between sub-sequences of length maxSeqLength.
#
if self.resetCalled or self.pamCounter == 0 or (self.maxSeqLength != 0 and
self.learnedSeqLength >= self.maxSeqLength):
if self.verbosity >= 3:
if self.resetCalled:
print "Starting over:", activeColumns, "(reset was called)"
elif self.pamCounter == 0:
print "Starting over:", activeColumns, "(PAM counter expired)"
else:
print "Starting over:", activeColumns, "(reached maxSeqLength)"
# Update average learned sequence length - this is a diagnostic statistic
if self.pamCounter == 0:
seqLength = self.learnedSeqLength - self.pamLength
else:
seqLength = self.learnedSeqLength
if self.verbosity >= 3:
print " learned sequence length was:", seqLength
self._updateAvgLearnedSeqLength(seqLength)
# Backtrack to an earlier starting point, if we find one
backSteps = 0
if not self.resetCalled:
backSteps = self.learnBacktrack()
# Start over in the current time step if reset was called, or we couldn't
# backtrack.
if self.resetCalled or backSteps == 0:
self.lrnActiveState['t'].fill(0)
for c in activeColumns:
self.lrnActiveState['t'][c,0] = 1
# Remove any old input history patterns
self._prevLrnPatterns = []
# Reset PAM counter
self.pamCounter = self.pamLength
self.learnedSeqLength = backSteps
# Clear out any old segment updates from prior sequences
self.segmentUpdates = {}
# ====================================================================
# Phase 2 - Compute new predicted state. When computing predictions for
# phase 2, we predict at most one cell per column (the one with the best
# matching segment).
self.learnPhase2()
#############################################################################
def compute(self, bottomUpInput, enableLearn, computeInfOutput=None):
""" Handle one compute, possibly learning.
Parameters:
-------------------------------------------------------------
bottomUpInput: The bottom-up input, typically from a spatial pooler
enableLearn: If true, perform learning
computeInfOutput: If None, default behavior is to disable the inference
output when enableLearn is on.
If true, compute the inference output
If false, do not compute the inference output
It is an error to have both enableLearn and computeInfOutput set to False
By default, we don't compute the inference output when learning because it
slows things down, but you can override this by passing in True for
computeInfOutput
"""
# As a speed optimization for now (until we need online learning), skip
# computing the inference output while learning
if computeInfOutput is None:
if enableLearn:
computeInfOutput = False
else:
computeInfOutput = True
assert (enableLearn or computeInfOutput)
# -------------------------------------------------------------------
# Get the list of columns that have bottom-up
activeColumns = bottomUpInput.nonzero()[0]
if enableLearn:
self.lrnIterationIdx += 1
self.iterationIdx += 1
if self.verbosity >= 3:
print "\n==== PY Iteration: %d =====" % (self.iterationIdx)
print "Active cols:", activeColumns
# -------------------------------------------------------------------
# Update segment duty cycles if we are crossing a "tier"
# We determine if it's time to update the segment duty cycles. Since the
# duty cycle calculation is a moving average based on a tiered alpha, it is
# important that we update all segments on each tier boundary
if enableLearn:
if self.lrnIterationIdx in Segment.dutyCycleTiers:
for c, i in product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
# -------------------------------------------------------------------
# Update the average input density
if self.avgInputDensity is None:
self.avgInputDensity = len(activeColumns)
else:
self.avgInputDensity = 0.99 * self.avgInputDensity \
+ 0.01 * len(activeColumns)
# ====================================================================
# First, update the inference state
# As a speed optimization for now (until we need online learning), skip
# computing the inference output while learning
if computeInfOutput:
self.updateInferenceState(activeColumns)
# ====================================================================
# Next, update the learning state
if enableLearn:
self.updateLearningState(activeColumns)
# ----------------------------------------------------------------------
# Apply global decay, and remove synapses and/or segments.
# Synapses are removed if their permanence value is <= 0.
# Segments are removed when they don't have synapses anymore.
# Removal of synapses can trigger removal of whole segments!
# todo: isolate the synapse/segment retraction logic so that
# it can be called in adaptSegments, in the case where we
# do global decay only episodically.
# if self.globalDecay > 0.0 and ((self.lrnIterationIdx % self.maxAge) == 0):
# for c, i in product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)):
# segsToDel = [] # collect and remove outside the loop
# for segment in self.cells[c][i]:
# age = self.lrnIterationIdx - segment.lastActiveIteration
# if age <= self.maxAge:
# continue
# synsToDel = [] # collect and remove outside the loop
# for synapse in segment.syns:
# synapse[2] = synapse[2] - self.globalDecay # decrease permanence
# if synapse[2] <= 0:
# synsToDel.append(synapse) # add to list to delete
# if len(synsToDel) == segment.getNumSynapses(): # 1 for sequenceSegment flag
# segsToDel.append(segment) # will remove the whole segment
# elif len(synsToDel) > 0:
# for syn in synsToDel: # remove some synapses on segment
# segment.syns.remove(syn)
# for seg in segsToDel: # remove some segments of this cell
# self.cleanUpdatesList(c,i,seg)
# self.cells[c][i].remove(seg)
# ========================================================================
# Update the prediction score stats
# Learning always includes inference
if self.collectStats:
if computeInfOutput:
predictedState = self.infPredictedState['t-1']
else:
predictedState = self.lrnPredictedState['t-1']
self._updateStatsInferEnd(self._internalStats,
activeColumns,
predictedState,
self.colConfidence['t-1'])
# Make trivial predictions and collect stats
if self.trivialPredictor is not None:
for m in self.trivialPredictor.methods:
if computeInfOutput:
self.trivialPredictor.infer(activeColumns)
self._updateStatsInferEnd(self.trivialPredictor._internalStats[m],
activeColumns,
self.trivialPredictor.predictedState[m]['t-1'],
self.trivialPredictor.confidence[m]['t-1'])
# Finally return the TP output
output = self.computeOutput()
# Print diagnostic information based on the current verbosity level
self.printComputeEnd(output, learn=enableLearn)
self.resetCalled = False
return output
################################################################################
def infer(self, bottomUpInput):
return self.compute(bottomUpInput, enableLearn=False)
################################################################################
def learn(self, bottomUpInput, computeInfOutput=None):
return self.compute(bottomUpInput, enableLearn=True,
computeInfOutput=computeInfOutput)
#############################################################################
def updateSegmentDutyCycles(self):
""" This gets called on every compute. It determines if it's time to
update the segment duty cycles. Since the duty cycle calculation is a
moving average based on a tiered alpha, it is important that we update
all segments on each tier boundary. """
if self.lrnIterationIdx not in [100, 1000, 10000]:
return
for c, i in product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
#############################################################################
def columnConfidences(self, cellConfidences=None):
""" Compute the column confidences given the cell confidences. If
None is passed in for cellConfidences, it uses the stored cell confidences
from the last compute.
Parameters:
----------------------------
cellConfidencs : cell confidences to use, or None to use the
the current cell confidences.
retval: : Column confidence scores.
"""
return self.colConfidence['t']
#############################################################################
def topDownCompute(self, topDownIn=None):
""" Top-down compute - generate expected input given output of the TP
Parameters:
----------------------------
topDownIn : top down input from the level above us
retval: : best estimate of the TP input that would have generated
bottomUpOut.
"""
# For now, we will assume there is no one above us and that bottomUpOut is
# simply the output that corresponds to our currently stored column
# confidences.
# Simply return the column confidences
return self.columnConfidences()
################################################################################
def trimSegmentsInCell(self, colIdx, cellIdx, segList, minPermanence,
minNumSyns):
""" This method goes through a list of segments for a given cell and
deletes all synapses whose permanence is less than minPermanence and deletes
any segments that have less than minNumSyns synapses remaining.
Parameters:
--------------------------------------------------------------
colIdx: Column index
cellIdx: cell index within the column
segList: List of segment references
minPermanence: Any syn whose permamence is 0 or < minPermanence will
be deleted.
minNumSyns: Any segment with less than minNumSyns synapses remaining
in it will be deleted.
retval: (numSegsRemoved, numSynsRemoved)
"""
# Fill in defaults
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
# Loop through all segments
nSegsRemoved, nSynsRemoved = 0, 0
segsToDel = [] # collect and remove segments outside the loop
for segment in segList:
# List if synapses to delete
synsToDel = [syn for syn in segment.syns if syn[2] < minPermanence]
if len(synsToDel) == len(segment.syns):
segsToDel.append(segment) # will remove the whole segment
else:
if len(synsToDel) > 0:
for syn in synsToDel: # remove some synapses on segment
segment.syns.remove(syn)
nSynsRemoved += 1
if len(segment.syns) < minNumSyns:
segsToDel.append(segment)
# Remove segments that don't have enough synapses and also take them
# out of the segment update list, if they are in there
nSegsRemoved += len(segsToDel)
for seg in segsToDel: # remove some segments of this cell
self.cleanUpdatesList(colIdx, cellIdx, seg)
self.cells[colIdx][cellIdx].remove(seg)
nSynsRemoved += len(seg.syns)
return nSegsRemoved, nSynsRemoved
################################################################################
def trimSegments(self, minPermanence=None, minNumSyns=None):
""" This method deletes all synapses whose permanence is less than
minPermanence and deletes any segments that have less than
minNumSyns synapses remaining.
Parameters:
--------------------------------------------------------------
minPermanence: Any syn whose permamence is 0 or < minPermanence will
be deleted. If None is passed in, then
self.connectedPerm is used.
minNumSyns: Any segment with less than minNumSyns synapses remaining
in it will be deleted. If None is passed in, then
self.activationThreshold is used.
retval: (numSegsRemoved, numSynsRemoved)
"""
# Fill in defaults
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
# Loop through all cells
totalSegsRemoved, totalSynsRemoved = 0, 0
for c,i in product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)):
(segsRemoved, synsRemoved) = self.trimSegmentsInCell(colIdx=c, cellIdx=i,
segList=self.cells[c][i], minPermanence=minPermanence,
minNumSyns=minNumSyns)
totalSegsRemoved += segsRemoved
totalSynsRemoved += synsRemoved
# Print all cells if verbosity says to
if self.verbosity >= 5:
print "Cells, all segments:"
self.printCells(predictedOnly=False)
return totalSegsRemoved, totalSynsRemoved
################################################################################
def cleanUpdatesList(self, col, cellIdx, seg):
"""
Removes any update that would be for the given col, cellIdx, segIdx.
NOTE: logically, we need to do this when we delete segments, so that if
an update refers to a segment that was just deleted, we also remove
that update from the update list. However, I haven't seen it trigger
in any of the unit tests yet, so it might mean that it's not needed
and that situation doesn't occur, by construction.
todo: check if that situation occurs.
"""
for key, updateList in self.segmentUpdates.iteritems():
c,i = key[0], key[1]
if c == col and i == cellIdx:
for update in updateList:
if update[1].segment == seg:
self.removeSegmentUpdate(update)
################################################################################
def finishLearning(self):
"""Called when learning has been completed. This method just calls
trimSegments. (finishLearning is here for backward compatibility)
"""
# Keep weakly formed synapses around because they contain confidence scores
# for paths out of learned sequenced and produce a better prediction than
# chance.
self.trimSegments(minPermanence=0.0001)
# Update all cached duty cycles for better performance right after loading
# in the trained network.
for c, i in product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)):
for segment in self.cells[c][i]:
segment.dutyCycle()
# For error checking purposes, make sure no start cell has incoming connections
if self.cellsPerColumn > 1:
for c in xrange(self.numberOfCols):
assert self.getNumSegmentsInCell(c, 0) == 0
################################################################################
def checkPrediction2(self, patternNZs, output=None, colConfidence=None,
details=False):
"""
This function will replace checkPrediction.
This function produces goodness-of-match scores for a set of input patterns, by
checking for their presense in the current and predicted output of the TP.
Returns a global count of the number of extra and missing bits, the
confidence scores for each input pattern, and (if requested) the
bits in each input pattern that were not present in the TP's prediction.
todo: Add option to check predictedState only.
Parameters:
==========
patternNZs: a list of input patterns that we want to check for. Each element
is a list of the non-zeros in that pattern.
output: The output of the TP. If not specified, then use the
TP's current output. This can be specified if you are
trying to check the prediction metric for an output from
the past.
colConfidence: The column confidences. If not specified, then use the
TP's current self.colConfidence. This can be specified if you are
trying to check the prediction metrics for an output
from the past.
details: if True, also include details of missing bits per pattern.
Return value:
============
The following list is returned:
[
totalExtras,
totalMissing,
[conf_1, conf_2, ...],
[missing1, missing2, ...]
]
totalExtras: a global count of the number of 'extras', i.e. bits that
are on in the current output but not in the or of all the
passed in patterns
totalMissing: a global count of all the missing bits, i.e. the bits that
are on in the or of the patterns, but not in the current
output
conf_i the confidence score for the i'th pattern in patternsToCheck
This consists of 3 items as a tuple:
(predictionScore, posPredictionScore, negPredictionScore)
missing_i the bits in the i'th pattern that were missing
in the output. This list is only returned if details is
True.
"""
# Get the non-zeros in each pattern
numPatterns = len(patternNZs)
# Compute the union of all the expected patterns
orAll = set()
orAll = orAll.union(*patternNZs)
# Get the list of active columns in the output
if output is None:
assert self.currentOutput is not None
output = self.currentOutput
output = set(output.sum(axis=1).nonzero()[0])
# Compute the total extra and missing in the output
totalExtras = len(output.difference(orAll))
totalMissing = len(orAll.difference(output))
# Get the percent confidence level per column by summing the confidence levels
# of the cells in the column. During training, each segment's confidence
# number is computed as a running average of how often it correctly
# predicted bottom-up activity on that column. A cell's confidence number
# is taken from the first active segment found in the cell. Note that
# confidence will only be non-zero for predicted columns.
if colConfidence is None:
colConfidence = self.colConfidence['t']
# Assign confidences to each pattern
confidences = []
for i in xrange(numPatterns):
# Sum of the column confidences for this pattern
positivePredictionSum = colConfidence[patternNZs[i]].sum()
# How many columns in this pattern
positiveColumnCount = len(patternNZs[i])
# Sum of all the column confidences
totalPredictionSum = colConfidence.sum()
# Total number of columns
totalColumnCount = len(colConfidence)
negativePredictionSum = totalPredictionSum - positivePredictionSum
negativeColumnCount = totalColumnCount - positiveColumnCount
# Compute the average confidence score per column for this pattern
if positiveColumnCount != 0:
positivePredictionScore = positivePredictionSum
else:
positivePredictionScore = 0.0
# Compute the average confidence score per column for the other patterns
if negativeColumnCount != 0:
negativePredictionScore = negativePredictionSum
else:
negativePredictionScore = 0.0
# Scale the positive and negative prediction scores so that they sum to 1.0
currentSum = negativePredictionScore + positivePredictionScore
if currentSum > 0:
positivePredictionScore *= 1.0/currentSum
negativePredictionScore *= 1.0/currentSum
predictionScore = positivePredictionScore - negativePredictionScore
confidences.append((predictionScore,
positivePredictionScore,
negativePredictionScore))
# Include detail? (bits in each pattern that were missing from the output)
if details:
missingPatternBits = [set(pattern).difference(output) \
for pattern in patternNZs]
return (totalExtras, totalMissing, confidences, missingPatternBits)
else:
return (totalExtras, totalMissing, confidences)
#############################################################################
def isSegmentActive(self, seg, activeState):
"""
A segment is active if it has >= activationThreshold connected
synapses that are active due to activeState.
Notes: studied various cutoffs, none of which seem to be worthwhile
list comprehension didn't help either
"""
# Computing in C - *much* faster
return isSegmentActive(seg.syns, activeState,
self.connectedPerm, self.activationThreshold)
#############################################################################
def getSegmentActivityLevel(self, seg, activeState,
connectedSynapsesOnly = False):
"""This routine computes the activity level of a segment given activeState.
It can tally up only connected synapses (permanence >= connectedPerm), or
all the synapses of the segment, at either t or t-1.
"""
# Computing in C - *much* faster
return getSegmentActivityLevel(seg.syns, activeState, connectedSynapsesOnly,
self.connectedPerm)
#############################################################################
def getBestMatchingCell(self, c, activeState, minThreshold):
"""Find weakly activated cell in column with at least minThreshold active
synapses.
Parameters:
--------------------------------------------------------------
c: which column to look at
activeState: the active cells
minThreshold: minimum number of synapses required
retval: (cellIdx, segment, numActiveSynapses)
"""
# Collect all cells in column c that have at least minThreshold in the most
# activated segment
bestActivityInCol = minThreshold
bestSegIdxInCol = -1
bestCellInCol = -1
for i in xrange(self.cellsPerColumn):
maxSegActivity = 0
maxSegIdx = 0
for j,s in enumerate(self.cells[c][i]):
activity = self.getSegmentActivityLevel(s, activeState)
if activity > maxSegActivity:
maxSegActivity = activity
maxSegIdx = j
if maxSegActivity >= bestActivityInCol:
bestActivityInCol = maxSegActivity
bestSegIdxInCol = maxSegIdx
bestCellInCol = i
if bestCellInCol == -1:
return (None, None, None)
else:
return (bestCellInCol, self.cells[c][bestCellInCol][bestSegIdxInCol],
bestActivityInCol)
#############################################################################
def getBestMatchingSegment(self, c, i, activeState):
"""For the given cell, find the segment with the largest number of active
synapses. This routine is aggressive in finding the best match. The
permanence value of synapses is allowed to be below connectedPerm. The number
of active synapses is allowed to be below activationThreshold, but must be
above minThreshold. The routine returns the segment index. If no segments are
found, then an index of -1 is returned.
"""
maxActivity, which = self.minThreshold, -1
for j,s in enumerate(self.cells[c][i]):
activity = self.getSegmentActivityLevel(s, activeState,
connectedSynapsesOnly=False)
if activity >= maxActivity:
maxActivity, which = activity, j
if which == -1:
return None
else:
return self.cells[c][i][which]
#############################################################################
def getCellForNewSegment(self, colIdx):
""" Return the index of a cell in this column which is a good candidate
for adding a new segment.
When we have fixed size resources in effect, we insure that we pick a
cell which does not already have the max number of allowed segments. If
none exists, we choose the least used segment in the column to re-allocate.
Parameters:
--------------------------------------------------------------
colIdx: which column to look at
retval: cellIdx
"""
# Not fixed size CLA, just choose a cell randomly
if self.maxSegmentsPerCell < 0:
if self.cellsPerColumn > 1:
# Don't ever choose the start cell (cell # 0) in each column
i = self._random.getUInt32(self.cellsPerColumn-1) + 1
else:
i = 0
return i
# ---------------------------------------------------------------------
# Fixed size CLA, choose from among the cells that are below the maximum
# number of segments.
# NOTE: It is important NOT to always pick the cell with the fewest number of
# segments. The reason is that if we always do that, we are more likely to
# run into situations where we choose the same set of cell indices to
# represent an 'A' in both context 1 and context 2. This is because the
# cell indices we choose in each column of a pattern will advance in
# lockstep (i.e. we pick cell indices of 1, then cell indices of 2, etc.).
candidateCellIdxs = []
if self.cellsPerColumn == 1:
minIdx = 0
maxIdx = 0
else:
minIdx = 1 # Don't include startCell in the mix
maxIdx = self.cellsPerColumn-1
for i in xrange(minIdx, maxIdx+1):
numSegs = len(self.cells[colIdx][i])
if numSegs < self.maxSegmentsPerCell:
candidateCellIdxs.append(i)
# If we found one, return with it. Note we need to use _random to maintain
# correspondence with CPP code.
if len(candidateCellIdxs) > 0:
#candidateCellIdx = random.choice(candidateCellIdxs)
candidateCellIdx = \
candidateCellIdxs[self._random.getUInt32(len(candidateCellIdxs))]
if self.verbosity >= 5:
print "Cell [%d,%d] chosen for new segment, # of segs is %d" \
% (colIdx, candidateCellIdx, len(self.cells[colIdx][candidateCellIdx]))
return candidateCellIdx
# ---------------------------------------------------------------------
# All cells in the column are full, find a segment to free up
candidateSegment = None
candidateSegmentDC = 1.0
# For each cell in this column
for i in xrange(minIdx, maxIdx+1):
# For each segment in this cell
for s in self.cells[colIdx][i]:
dc = s.dutyCycle()
if dc < candidateSegmentDC:
candidateCellIdx = i
candidateSegmentDC = dc
candidateSegment = s
# Free up the least used segment
if self.verbosity >= 5:
print "Deleting segment #%d for cell[%d,%d] to make room for new segment" \
% (candidateSegment.segID, colIdx, candidateCellIdx)
candidateSegment.debugPrint()
self.cleanUpdatesList(colIdx, candidateCellIdx, candidateSegment)
self.cells[colIdx][candidateCellIdx].remove(candidateSegment)
return candidateCellIdx
##############################################################################
def getSegmentActiveSynapses(self, c, i, s, activeState, newSynapses=False):
"""Return a segmentUpdate data structure containing a list of proposed changes
to segment s. Let activeSynapses be the list of active synapses where the
originating cells have their activeState output = 1 at time step t.
(This list is empty if s is None since the segment doesn't exist.)
newSynapses is an optional argument that defaults to false. If newSynapses
is true, then newSynapseCount - len(activeSynapses) synapses are added to
activeSynapses. These synapses are randomly chosen from the set of cells
that have learnState = 1 at timeStep."""
activeSynapses = []
if s is not None: # s can be None, if adding a new segment
# Here we add *integers* to activeSynapses
activeSynapses = [idx for idx, syn in enumerate(s.syns) \
if activeState[syn[0], syn[1]]]
if newSynapses: # add a few more synapses
nSynapsesToAdd = self.newSynapseCount - len(activeSynapses)
# Here we add *pairs* (colIdx, cellIdx) to activeSynapses
activeSynapses += self.chooseCellsToLearnFrom(c, i, s, nSynapsesToAdd,
activeState)
# It's still possible that activeSynapses is empty, and this will
# be handled in addToSegmentUpdates
# NOTE: activeSynapses contains a mixture of integers and pairs of integers
# - integers are indices of synapses already existing on the segment,
# that we will need to update.
# - pairs represent source (colIdx, cellIdx) of new synapses to create on the
# segment
update = TP.SegmentUpdate(c, i, s, activeSynapses)
return update
##############################################################################
def chooseCellsToLearnFrom(self, c, i, s, n, activeState):
"""Choose n random cells to learn from.
This function is called several times while learning with timeStep = t-1, so
we cache the set of candidates for that case. It's also called once with
timeStep = t, and we cache that set of candidates.
Returns tuples of (column index, cell index).
"""
if n <= 0:
return []
tmpCandidates = numpy.where(activeState == 1)
# Candidates can be empty at this point, in which case we return
# an empty segment list. adaptSegments will do nothing when getting
# that list.
if len(tmpCandidates[0]) == 0:
return []
if s is None: # new segment
cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1])]
else:
# We exclude any synapse that is already in this segment.
synapsesAlreadyInSegment = set((syn[0], syn[1]) for syn in s.syns)
cands = [syn for syn in zip(tmpCandidates[0], tmpCandidates[1]) \
if (syn[0], syn[1]) not in synapsesAlreadyInSegment]
# If we have no more candidates than requested, return all of them,
# no shuffle necessary.
if len(cands) <= n:
return cands
if n == 1: # so that we don't shuffle if only one is needed
idx = self._random.getUInt32(len(cands))
return [cands[idx]] # col and cell idx in col
# If we need more than one candidate
indices = array([j for j in range(len(cands))], dtype='uint32')
tmp = zeros(min(n, len(indices)), dtype='uint32')
self._random.getUInt32Sample(indices, tmp, True)
return [cands[j] for j in tmp]
################################################################################
def processSegmentUpdates(self, activeColumns):
""" Go through the list of accumulated segment updates and process them
as follows:
if the segment update is too old, remove the update
else if the cell received bottom-up, update its permanences
else if it's still being predicted, leave it in the queue
else remove it
"""
# =================================================================
# The segmentUpdates dict has keys which are the column,cellIdx of the
# owner cell. The values are lists of segment updates for that cell
removeKeys = []
trimSegments = []
for key, updateList in self.segmentUpdates.iteritems():
# Get the column number and cell index of the owner cell
c,i = key[0], key[1]
# If the cell received bottom-up, update its segments
if c in activeColumns:
action = 'update'
# If not, either keep it around if it's still predicted, or remove it
else:
# If it is still predicted, and we are pooling, keep it around
if self.doPooling and self.lrnPredictedState['t'][c,i] == 1:
action = 'keep'
else:
action = 'remove'
# Process each segment for this cell. Each segment entry contains
# [creationDate, SegmentInfo]
updateListKeep = []
if action != 'remove':
for (createDate, segUpdate) in updateList:
if self.verbosity >= 4:
print "_nLrnIterations =",self.lrnIterationIdx,
print segUpdate
# If this segment has expired. Ignore this update (and hence remove it
# from list)
if self.lrnIterationIdx - createDate > self.segUpdateValidDuration:
continue
if action == 'update':
trimSegment = self.adaptSegment(segUpdate)
if trimSegment:
trimSegments.append((segUpdate.columnIdx, segUpdate.cellIdx,
segUpdate.segment))
else:
# Keep segments that haven't expired yet (the cell is still being
# predicted)
updateListKeep.append((createDate,segUpdate))
self.segmentUpdates[key] = updateListKeep
if len(updateListKeep) == 0:
removeKeys.append(key)
# =====================================================================
# Clean out empty segment updates
for key in removeKeys:
self.segmentUpdates.pop(key)
# =====================================================================
# Trim segments that had synapses go to 0
for (c, i, segment) in trimSegments:
self.trimSegmentsInCell(c, i, [segment], minPermanence = 0.00001,
minNumSyns = 0)
################################################################################
def adaptSegment(self, segUpdate):
"""This function applies segment update information to a segment in a
cell.
Synapses on the active list get their permanence counts incremented by
permanenceInc. All other synapses get their permanence counts decremented
by permanenceDec.
We also increment the positiveActivations count of the segment.
Parameters:
-----------------------------------------------------------
segUpdate: SegmentUpdate instance
retval: True if some synapses were decremented to 0
and the segment is a candidate for trimming
"""
# This will be set to True if detect that any syapses were decremented to
# 0
trimSegment = False
# segUpdate.segment is None when creating a new segment
c, i, segment = segUpdate.columnIdx, segUpdate.cellIdx, segUpdate.segment
# update.activeSynapses can be empty.
# If not, it can contain either or both integers and tuples.
# The integers are indices of synapses to update.
# The tuples represent new synapses to create (src col, src cell in col).
# We pre-process to separate these various element types.
# synToCreate is not empty only if positiveReinforcement is True.
# NOTE: the synapse indices start at *1* to skip the segment flags.
activeSynapses = segUpdate.activeSynapses
synToUpdate = set([syn for syn in activeSynapses if type(syn) == int])
# ------------------------------------------------------------------
# Modify an existing segment
if segment is not None:
if self.verbosity >= 4:
print "Reinforcing segment #%d for cell[%d,%d]" % (segment.segID, c, i)
print " before:",
segment.debugPrint()
# Mark it as recently useful
segment.lastActiveIteration = self.lrnIterationIdx
# Update frequency and positiveActivations
segment.positiveActivations += 1 # positiveActivations += 1
segment.dutyCycle(active=True)
# First, decrement synapses that are not active
# s is a synapse *index*, with index 0 in the segment being the tuple
# (segId, sequence segment flag). See below, creation of segments.
lastSynIndex = len(segment.syns) - 1
inactiveSynIndices = [s for s in xrange(0, lastSynIndex+1) \
if s not in synToUpdate]
trimSegment = segment.updateSynapses(inactiveSynIndices, -self.permanenceDec)
# Now, increment active synapses
activeSynIndices = [syn for syn in synToUpdate if syn <= lastSynIndex]
segment.updateSynapses(activeSynIndices, self.permanenceInc)
# Finally, create new synapses if needed
# syn is now a tuple (src col, src cell)
synsToAdd = [syn for syn in activeSynapses if type(syn) != int]
# If we have fixed resources, get rid of some old syns if necessary
if self.maxSynapsesPerSegment > 0 \
and len(synsToAdd) + len(segment.syns) > self.maxSynapsesPerSegment:
numToFree = len(segment.syns) + len(synsToAdd) - self.maxSynapsesPerSegment
segment.freeNSynapses(numToFree, inactiveSynIndices, self.verbosity)
for newSyn in synsToAdd:
segment.addSynapse(newSyn[0], newSyn[1], self.initialPerm)
if self.verbosity >= 4:
print " after:",
segment.debugPrint()
# ------------------------------------------------------------------
# Create a new segment
else:
# (segID, sequenceSegment flag, frequency, positiveActivations,
# totalActivations, lastActiveIteration)
newSegment = Segment(tp=self, isSequenceSeg=segUpdate.sequenceSegment)
# numpy.float32 important so that we can match with C++
for synapse in activeSynapses:
newSegment.addSynapse(synapse[0], synapse[1], self.initialPerm)
if self.verbosity >= 3:
print "New segment #%d for cell[%d,%d]" % (self.segID-1, c, i),
newSegment.debugPrint()
self.cells[c][i].append(newSegment)
return trimSegment
################################################################################
def getSegmentInfo(self, collectActiveData = False):
"""Returns information about the distribution of segments, synapses and
permanence values in the current TP. If requested, also returns information
regarding the number of currently active segments and synapses.
The method returns the following tuple:
(
nSegments, # total number of segments
nSynapses, # total number of synapses
nActiveSegs, # total no. of active segments
nActiveSynapses, # total no. of active synapses
distSegSizes, # a dict where d[n] = number of segments with n synapses
distNSegsPerCell, # a dict where d[n] = number of cells with n segments
distPermValues, # a dict where d[p] = number of synapses with perm = p/10
distAges, # a list of tuples (ageRange, numSegments)
)
nActiveSegs and nActiveSynapses are 0 if collectActiveData is False
"""
nSegments, nSynapses = 0, 0
nActiveSegs, nActiveSynapses = 0, 0
distSegSizes, distNSegsPerCell = {}, {}
distPermValues = {} # Num synapses with given permanence values
numAgeBuckets = 20
distAges = []
ageBucketSize = int((self.lrnIterationIdx+20) / 20)
for i in range(numAgeBuckets):
distAges.append(['%d-%d' % (i*ageBucketSize, (i+1)*ageBucketSize-1), 0])
for c in xrange(self.numberOfCols):
for i in xrange(self.cellsPerColumn):
if len(self.cells[c][i]) > 0:
nSegmentsThisCell = len(self.cells[c][i])
nSegments += nSegmentsThisCell
if distNSegsPerCell.has_key(nSegmentsThisCell):
distNSegsPerCell[nSegmentsThisCell] += 1
else:
distNSegsPerCell[nSegmentsThisCell] = 1
for seg in self.cells[c][i]:
nSynapsesThisSeg = seg.getNumSynapses()
nSynapses += nSynapsesThisSeg
if distSegSizes.has_key(nSynapsesThisSeg):
distSegSizes[nSynapsesThisSeg] += 1
else:
distSegSizes[nSynapsesThisSeg] = 1
# Accumulate permanence value histogram
for syn in seg.syns:
p = int(syn[2]*10)
if distPermValues.has_key(p):
distPermValues[p] += 1
else:
distPermValues[p] = 1
# Accumulate segment age histogram
age = self.lrnIterationIdx - seg.lastActiveIteration
ageBucket = int(age/ageBucketSize)
distAges[ageBucket][1] += 1
# Get active synapse statistics if requested
if collectActiveData:
if self.isSegmentActive(seg, self.infActiveState['t']):
nActiveSegs += 1
for syn in seg.syns:
if self.activeState['t'][syn[0]][syn[1]] == 1:
nActiveSynapses += 1
return (nSegments, nSynapses, nActiveSegs, nActiveSynapses,\
distSegSizes, distNSegsPerCell, distPermValues, distAges)
################################################################################
################################################################################
class Segment():
"""
The Segment class is a container for all of the segment variables and
the synapses it owns.
"""
# These are iteration count tiers used when computing segment duty cycle
dutyCycleTiers = [0, 100, 320, 1000,
3200, 10000, 32000, 100000,
320000]
# This is the alpha used in each tier. dutyCycleAlphas[n] is used when
# iterationIdx > dutyCycleTiers[n]
dutyCycleAlphas = [None, 0.0032, 0.0010, 0.00032,
0.00010, 0.000032, 0.00001, 0.0000032,
0.0000010]
###############################################################
def __init__(self, tp, isSequenceSeg):
self.tp = tp
self.segID = tp.segID
tp.segID += 1
self.isSequenceSeg = isSequenceSeg
self.lastActiveIteration = tp.lrnIterationIdx
self.positiveActivations = 1
self.totalActivations = 1
# These are internal variables used to compute the positive activations
# duty cycle.
# Callers should use dutyCycle()
self._lastPosDutyCycle = 1.0 / tp.lrnIterationIdx
self._lastPosDutyCycleIteration = tp.lrnIterationIdx
# Each synapse is a tuple (srcCellCol, srcCellIdx, permanence)
self.syns = []
###############################################################
def dutyCycle(self, active=False, readOnly=False):
""" Compute/update and return the positive activations duty cycle of
this segment. This is a measure of how often this segment is
providing good predictions.
Parameters:
----------------------------------------------------------
active: True if segment just provided a good prediction
readOnly: If True, compute the updated duty cycle, but don't change
the cached value. This is used by debugging print statements.
NOTE: This method relies on different schemes to compute the duty cycle
based on how much history we have. In order to support this tiered
approach IT MUST BE CALLED ON EVERY SEGMENT AT EACH DUTY CYCLE TIER
(self.dutyCycleTiers)
When we don't have a lot of history yet (first tier), we simply return
number of positive activations / total number of iterations
After a certain number of iterations have accumulated, it converts into
a moving average calcuation, which is updated only when requested
since it can be a bit expensive to compute on every iteration (it uses
the pow() function).
The duty cycle is computed as follows:
dc[t] = (1-alpha) * dc[t-1] + alpha * value[t]
If the value[t] has been 0 for a number of steps in a row, you can apply
all of the updates at once using:
dc[t] = (1-alpha)^(t-lastT) * dc[lastT]
We use the alphas and tiers as defined in self.dutyCycleAlphas and
self.dutyCycleTiers.
"""
# For tier #0, compute it from total number of positive activations seen
if self.tp.lrnIterationIdx <= self.dutyCycleTiers[1]:
dutyCycle = float(self.positiveActivations) \
/ self.tp.lrnIterationIdx
if not readOnly:
self._lastPosDutyCycleIteration = self.tp.lrnIterationIdx
self._lastPosDutyCycle = dutyCycle
return dutyCycle
# How old is our update?
age = self.tp.lrnIterationIdx - self._lastPosDutyCycleIteration
# If it's already up to date, we can returned our cached value.
if age == 0 and not active:
return self._lastPosDutyCycle
# Figure out which alpha we're using
for tierIdx in range(len(self.dutyCycleTiers)-1, 0, -1):
if self.tp.lrnIterationIdx > self.dutyCycleTiers[tierIdx]:
alpha = self.dutyCycleAlphas[tierIdx]
break
#print "iterationIdx", self.tp.lrnIterationIdx, ", alpha", alpha
# Update duty cycle
dutyCycle = pow(1.0-alpha, age) * self._lastPosDutyCycle
if active:
dutyCycle += alpha
# Update cached values if not read-only
if not readOnly:
self._lastPosDutyCycleIteration = self.tp.lrnIterationIdx
self._lastPosDutyCycle = dutyCycle
return dutyCycle
###############################################################
def debugPrint(self):
""" Print segment information for verbose messaging and debugging.
This uses the following format:
ID:54413 True 0.64801 (24/36) 101 [9,1]0.75 [10,1]0.75 [11,1]0.75
where:
54413 - is the unique segment id
True - is sequence segment
0.64801 - moving average duty cycle
(24/36) - (numPositiveActivations / numTotalActivations)
101 - age, number of iterations since last activated
[9,1]0.75 - synapse from column 9, cell #1, strength 0.75
[10,1]0.75 - synapse from column 10, cell #1, strength 0.75
[11,1]0.75 - synapse from column 11, cell #1, strength 0.75
"""
# Segment ID
print "ID:%-5d" % (self.segID),
# Sequence segment or pooling segment
if self.isSequenceSeg:
print "True",
else:
print "False",
# Duty cycle
print "%9.7f" % (self.dutyCycle(readOnly=True)),
# numPositive/totalActivations
print "(%4d/%-4d)" % (self.positiveActivations,
self.totalActivations),
# Age
print "%4d" % (self.tp.lrnIterationIdx - self.lastActiveIteration),
# Print each synapses on this segment as: srcCellCol/srcCellIdx/perm
# if the permanence is above connected, put [] around the synapse info
# For aid in comparing to the C++ implementation, print them in sorted
# order
sortedSyns = sorted(self.syns)
for i,synapse in enumerate(sortedSyns):
print "[%d,%d]%4.2f" % (synapse[0], synapse[1], synapse[2]),
print
################################################################################
def isSequenceSegment(self):
return self.isSequenceSeg
################################################################################
def getNumSynapses(self):
return len(self.syns)
################################################################################
def freeNSynapses(self, numToFree, inactiveSynapseIndices, verbosity= 0):
"""Free up some synapses in this segment. We always free up inactive synapses
(lowest permanence freed up first) before we start to free up active ones.
Parameters:
--------------------------------------------------------------------
numToFree: number of synapses to free up
inactiveSynapseIndices: list of the inactive synapse indices.
"""
# Make sure numToFree isn't larger than the total number of syns we have
assert (numToFree <= len(self.syns))
if (verbosity >= 4):
print "\nIn PY freeNSynapses with numToFree =", numToFree,
print "inactiveSynapseIndices =",
for i in inactiveSynapseIndices: print self.syns[i][0:2],
print
# Remove the lowest perm inactive synapses first
if len(inactiveSynapseIndices) > 0:
perms = numpy.array([self.syns[i][2] for i in inactiveSynapseIndices])
candidates = numpy.array(inactiveSynapseIndices)[perms.argsort()[0:numToFree]]
candidates = list(candidates)
else:
candidates = []
# Do we need more? if so, remove the lowest perm active synapses too
if len(candidates) < numToFree:
activeSynIndices = [i for i in xrange(len(self.syns)) \
if i not in inactiveSynapseIndices]
perms = numpy.array([self.syns[i][2] for i in activeSynIndices])
moreToFree = numToFree - len(candidates)
moreCandidates = numpy.array(activeSynIndices)[perms.argsort()[0:moreToFree]]
candidates += list(moreCandidates)
if verbosity >= 4:
print "Deleting %d synapses from segment to make room for new ones:" \
% (len(candidates)), candidates
print "BEFORE:",
self.debugPrint()
# Free up all the candidates now
synsToDelete = [self.syns[i] for i in candidates]
for syn in synsToDelete:
self.syns.remove(syn)
if verbosity >= 4:
print "AFTER:",
self.debugPrint()
################################################################################
def addSynapse(self, srcCellCol, srcCellIdx, perm):
"""Add a new synapse
Parameters:
--------------------------------------------------------------------
srcCellCol: source cell column
srcCellIdx: source cell index within the column
perm: initial permanence
"""
self.syns.append([int(srcCellCol), int(srcCellIdx), numpy.float32(perm)])
################################################################################
def updateSynapses(self, synapses, delta):
"""Update a set of synapses in the segment.
Parameters:
--------------------------------------------------------------------
tp: The owner TP
synapses: List of synapse indices to update
delta: How much to add to each permanence
retval: True if synapse reached 0
"""
reached0 = False
if delta > 0:
for synapse in synapses:
self.syns[synapse][2] = newValue = self.syns[synapse][2] + delta
# Cap synapse permanence at permanenceMax
if newValue > self.tp.permanenceMax:
self.syns[synapse][2] = self.tp.permanenceMax
else:
for synapse in synapses:
self.syns[synapse][2] = newValue = self.syns[synapse][2] + delta
# Cap min synapse permanence to 0 in case there is no global decay
if newValue <= 0:
self.syns[synapse][2] = 0
reached0 = True
return reached0
|
gpl-3.0
|
AdamRTomkins/libSpineML
|
libSpineML/smlUtils.py
|
1
|
10142
|
"""
A script to convert the drosophila connectome into SpineML
This build upon the pure data to add in the required infered network components:
# Install libSpineML from source
# https://github.com/AdamRTomkins/libSpineML
"""
from __future__ import division
from libSpineML import smlExperiment as exp
from libSpineML import smlNetwork as net
from libSpineML import smlComponent as com
import csv
import sys
import cStringIO
import graphviz as gv
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import copy
neuron_fieldnames = ['neuron_name', 'innv_neuropil', 'mem_model', 'resting_pot', 'reset_pot', 'threshold_pot', 'rfact_period', 'Cm', 'tm']
neuron_property_list = ['resting_pot', 'reset_pot', 'threshold_pot', 'rfact_period', 'Cm', 'tm']
default_neuron_models ={}
default_neuron_models['LIF'] = {'resting_pot' :-60,
'reset_pot' :-70,
'threshold_pot' :-10,
'rfact_period' :0,
'Cm' :10,
'tm' :10
}
default_neuron_models['ESN'] = {}
def process_files(neuron_file,synapse_file):
""" Convert the neuron and synapse files into populations, projections and neurons """
# Process the text files
neuron_reader = csv.DictReader(open(neuron_file), fieldnames=neuron_fieldnames,delimiter=' ')
synapse_reader = csv.DictReader(open(synapse_file), fieldnames=synapse_fieldnames,delimiter=' ')
neurons = {}
populations = {}
projections = {}
for row in neuron_reader:
lpu = row['innv_neuropil']
name = row['neuron_name']
if lpu not in populations:
populations[lpu] = [name]
else:
populations[lpu].append(name)
neurons[name] = row
neurons[name]['index']= len(populations[lpu])-1
for row in synapse_reader:
pre_neuron = row['pre-neuron']
post_neuron = row['post-neuron']
# get the LPU of the pre neuron
pre_lpu = neurons[pre_neuron]['innv_neuropil']
# get the LPU index of the pre neuron
pre_index = neurons[pre_neuron]['index']
# get the LPU of the post neuron
post_lpu = neurons[post_neuron]['innv_neuropil']
# get the LPU index of the post neuron
post_index = neurons[post_neuron]['index']
if pre_lpu not in projections:
projections[pre_lpu] = {}
if post_lpu not in projections[pre_lpu]:
projections[pre_lpu][post_lpu] = []
projections[pre_lpu][post_lpu].append((pre_index,post_index))
return (neurons, populations, projections)
def create_spineml_network(neurons, populations,
projections,output=False,output_filename='model.xml',project_name= 'drosophila'):
""" convert projections and populations into a SpineML network """
output = {
'network' : {
'name':None,
'xml' : None
},
'components' : []
}
# create the network SpineML type
network = net.SpineMLType()
# for each population, create a Population type
for p in populations:
population = net.PopulationType()
# create a neuron type
neuron = net.NeuronType()
n = neurons.keys()[0] # The model neuron to use as the template
# Build this Neuron Sets Property list
# Currently all fixed value # TODO
for np in default_neuron_models['LIF'].keys():
# WIP: specify based on model
# Get non-default values
value = net.FixedValueType(default_neuron_models[neurons[n]['mem_model']][np]) # Currently using a fixed value, should use valuelist
name = np
dimension = '?'#Todo Add dimensions to property list
neuron_property = net.PropertyType()
neuron_property.set_name(name)
neuron_property.set_dimension(dimension)
neuron_property.set_AbstractValue(value)
neuron.add_Property(neuron_property)
neuron.set_name(p)
component_file_name = neurons[n]['mem_model']+'.xml'
neuron.set_url(component_file_name)
output['components'].append(component_file_name)
neuron.set_size(len(populations[p]))
# Assign to population
population.set_Neuron(neuron)
# create a projection
if p in projections:
for cn, destination in enumerate(projections[p]):
projection = net.ProjectionType(destination)
# Add synapses
#For every connection, we will create a new synapse
for index, connection in enumerate(projections[p][destination]):
synapse_file_name = 'CurrExp.xml'
# Create a PostSynapse
postSynapse = net.PostSynapseType(
name='CurrExp',
url = synapse_file_name,
Property=
[
net.PropertyType(name='tau_syn', AbstractValue=net.FixedValueType(value=10))
],
input_src_port=None,
input_dst_port=None,
output_src_port=None,
output_dst_port=None
)
output['components'].append(synapse_file_name)
## Create Connectivity
connection_list = net.ConnectionListType()
connection_type = net.ConnectionType(connection[0],connection[1],0) # zero delay
connection_list.add_Connection(connection_type)
weightValue = net.ValueType(index=int(index),value=float(connection[2]))
update_file_name = 'FixedWeight.xml'
# Create a PostSynapse
weightUpdate = net.WeightUpdateType(
name='"%s to %s Synapse %s weight_update' % (p,destination,cn),
url=update_file_name,
input_src_port="spike",
input_dst_port="spike",
feedback_src_port=None,
feedback_dst_port=None
)
output['components'].append(update_file_name)
prop = net.PropertyType(name='w',dimension="?")
prop.set_AbstractValue(weightValue)
io = cStringIO.StringIO()
prop.export(io,0)
st = io.getvalue()
weightUpdate.set_Property([prop])
io = cStringIO.StringIO()
weightUpdate.export(io,0)
st = io.getvalue()
# Create Synapse
synapse = net.SynapseType(
AbstractConnection=connection_list,
WeightUpdate=weightUpdate,
PostSynapse=postSynapse
)
projection.add_Synapse(synapse)
population.add_Projection(projection)
# add population to the network
network.add_Population(population)
# Write out network to xml
io = cStringIO.StringIO()
network.export(io,0)
network = io.getvalue()
# Cleanup Replace Abstract objects with non_abstract
subs = {
"AbstractConnection":"ConnectionList",
"AbstractValue":"FixedValue",
"Population":"LL:Population",
"Neuron":"LL:Neuron",
"Projection":"LL:Projection",
"<Synapse>":"<LL:Synapse>", # REQURED DUE TO PostSynapse Overlap
"</Synapse>":"</LL:Synapse>",
"<PostSynapse":"<LL:PostSynapse", # REQURED DUE TO PostSynapse Overlap
"</PostSynapse>":"</LL:PostSynapse>",
"ConnectionList": "LL:ConnectionList",
"WeightUpdate":"LL:WeightUpdate",
'<SpineMLType>':
'<LL:SpineML xsi:schemaLocation="http://www.shef.ac.uk/SpineMLLowLevelNetworkLayer SpineMLLowLevelNetworkLayer.xsd http://www.shef.ac.uk/SpineMLNetworkLayer SpineMLNetworkLayer.xsd" name="%s">' % project_name,
'</SpineMLType>':'</LL:SpineML>'
}
for k in subs:
network = network.replace(k,subs[k])
if output:
with open(output_filename, 'w') as f:
f.write(network)
# Create Output SpineML JSON reprentation
output['network']['name'] = 'model.xml'
output['network']['xml'] = network
# WIP: Add each component xml too
components = set(output['components'])
output['components'] = list(components)
return output
def create_graphviz_graph(populations,projections):
""" convert the projections matrix to a svg graph """
g1 = gv.Digraph(format='svg')
for lpu in populations.keys():
if lpu.lower() == lpu:
g1.node(lpu)
for pre in projections.keys():
if pre.lower() == pre:
for post in projections[pre]:
if post.lower() == post:
if len(projections[pre][post]) > 100:
g1.edge(pre, post,weight = str(len(projections[pre][post])))
filename = g1.render(filename='left_hemisphere')
def create_networkx_graph(populations,projections,prune=10):
""" convert the projections matrix to a svg graph """
network = nx.Graph()
lpus = populations.keys()
for lpu in lpus:
network.add_node(lpu)
for pre in projections.keys():
for post in projections[pre]:
if len(projections[pre][post]) > prune:
network.add_edge(pre, post, weight=1.0/len(projections[pre][post]))
return network
|
gpl-3.0
|
arju88nair/projectCulminate
|
venv/lib/python3.5/encodings/shift_jisx0213.py
|
816
|
1059
|
#
# shift_jisx0213.py: Python Unicode Codec for SHIFT_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
apache-2.0
|
yannickcr/CouchPotatoServer
|
couchpotato/core/media/_base/providers/torrent/thepiratebay.py
|
7
|
6057
|
import re
import traceback
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider
import six
log = CPLog(__name__)
class Base(TorrentMagnetProvider):
urls = {
'detail': '%s/torrent/%s',
'search': '%s/search/%%s/%%s/7/%%s'
}
cat_backup_id = 200
disable_provider = False
http_time_between_calls = 0
proxy_list = [
'https://nobay.net',
'https://thebay.al',
'https://thepiratebay.se',
'http://thepiratebay.cd',
'http://thebootlegbay.com',
'http://www.tpb.gr',
'http://tpbproxy.co.uk',
'http://pirateproxy.in',
'http://www.getpirate.com',
'http://piratebay.io',
'http://bayproxy.li',
'http://proxybay.pw',
]
def _search(self, media, quality, results):
page = 0
total_pages = 1
cats = self.getCatId(quality)
base_search_url = self.urls['search'] % self.getDomain()
while page < total_pages:
search_url = base_search_url % self.buildUrl(media, page, cats)
page += 1
data = self.getHTMLData(search_url)
if data:
try:
soup = BeautifulSoup(data)
results_table = soup.find('table', attrs = {'id': 'searchResult'})
if not results_table:
return
try:
total_pages = len(soup.find('div', attrs = {'align': 'center'}).find_all('a'))
except:
pass
entries = results_table.find_all('tr')
for result in entries[1:]:
link = result.find(href = re.compile('torrent\/\d+\/'))
download = result.find(href = re.compile('magnet:'))
try:
size = re.search('Size (?P<size>.+),', six.text_type(result.select('font.detDesc')[0])).group('size')
except:
continue
if link and download:
def extra_score(item):
trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None]
vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None]
confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None]
moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None]
return confirmed + trusted + vip + moderated
results.append({
'id': re.search('/(?P<id>\d+)/', link['href']).group('id'),
'name': six.text_type(link.string),
'url': download['href'],
'detail_url': self.getDomain(link['href']),
'size': self.parseSize(size),
'seeders': tryInt(result.find_all('td')[2].string),
'leechers': tryInt(result.find_all('td')[3].string),
'extra_score': extra_score,
'get_more_info': self.getMoreInfo
})
except:
log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def isEnabled(self):
return super(Base, self).isEnabled() and self.getDomain()
def correctProxy(self, data):
return 'title="Pirate Search"' in data
def getMoreInfo(self, item):
full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000)
html = BeautifulSoup(full_description)
nfo_pre = html.find('div', attrs = {'class': 'nfo'})
description = ''
try:
description = toUnicode(nfo_pre.text)
except:
pass
item['description'] = description
return item
config = [{
'name': 'thepiratebay',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'ThePirateBay',
'description': 'The world\'s largest bittorrent tracker. See <a href="http://fucktimkuik.org/">ThePirateBay</a>',
'wizard': True,
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False
},
{
'name': 'domain',
'advanced': True,
'label': 'Proxy server',
'description': 'Domain for requests, keep empty to let CouchPotato pick.',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
}
]
}]
|
gpl-3.0
|
yaukwankiu/twstocks
|
mark1.py
|
1
|
9719
|
# -*- coding: utf8 -*-
############################
# imports
import time
import datetime
import urllib2
import re
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
############################
# defining the parameters
currentPriceRegex = re.compile(r'(?<=\<td\ align\=\"center\"\ bgcolor\=\"\#FFFfff\"\ nowrap\>\<b\>)\d*\.\d*(?=\<\/b\>\<\/td\>)')
#companyNameRegex = re.compile( ur'(?<=\<TITLE\>).+(?=-公司資料-奇摩股市\<\/TITLE\>)',re.UNICODE) #doesn't work somehow
companyNameRegex = re.compile( ur'\<TITLE.+TITLE\>', re.UNICODE)
stockSymbolsList = []
outputFolder = "c:/chen chen/stocks/"
stockSymbolsFile='stockSymbols.pydump'
pricesFolder = outputFolder+ "prices/"
stocksFolder = outputFolder +"stocks/"
############################
#
############################
# defining the classes
class stock:
def __init__(self, symbol):
"""e.g.
https://tw.stock.yahoo.com/d/s/company_1473.html
"""
symbol= ('000'+str(symbol))[-4:]
self.symbol = symbol
self.yahooFrontPageUrl = 'https://tw.stock.yahoo.com/d/s/company_' + symbol + '.html'
self.yahooCurrentPageUrl = 'https://tw.stock.yahoo.com/q/q?s=' + symbol
# get some basic information from the front page
yahooFrontPage = urllib2.urlopen(self.yahooFrontPageUrl)
raw_text = yahooFrontPage.read()
self.name = companyNameRegex.findall(raw_text)[0]
self.name = self.name[7:-26]
self.pricesList = []
def __call__(self):
outputString = ""
#outputString += self.symbol + '\n' #unnecessary
outputString += self.name + '\n'
outputString += self.yahooCurrentPageUrl + '\n'
if self.pricesList != []:
outputString += '\n'.join([time.asctime(time.localtime((v['pingTime'])))+ ": $" + str(v['price']) for v in self.pricesList])
print outputString
def openYahooCurrentPage(self):
self.yahooCurrentPage = urllib2.urlopen(self.yahooCurrentPageUrl)
def getCurrentPrice(self, verbose=True, showResponseTime=True):
self.openYahooCurrentPage()
t0 = time.time()
raw_text = self.yahooCurrentPage.read()
t1 = time.time()
self.yahooCurrentPage.close()
currentPrice = currentPriceRegex.findall(raw_text)[0]
self.currentPricePingTime = t0
self.currentPricePingReturnTime = t1
self.currentPrice = currentPrice
if verbose:
print "Time: ", time.asctime(time.localtime(t0)),
if showResponseTime:
print "(response time: ", t1-t0, ")",
#print self.symbol, #unnecessary
print self.name, "Price:", currentPrice
self.pricesList.append({'price' : currentPrice,
'pingTime' : t0,
'responseTime' : t1-t0,
})
return currentPrice, t0, t1-t0
def getPriceList(self, throttle=1, repetitions=-999, verbose=True):
count = 0
while count!= repetitions:
count +=1
p, t0, dt = self.getCurrentPrice(verbose=verbose)
self.pricesList.append({'price' : p,
'pingTime' : t0,
'responseTime' : dt,
})
if throttle>0:
time.sleep(throttle)
def writeCurrentPrice(self, verbose=True):
P = self.pricesList[-1] # the last one
currentPrice = P['price']
t0 = P['pingTime']
dt = P['responseTime']
outputString= ''
if not os.path.exists(pricesFolder+self.name+'.dat'):
outputString = "#time, price, response time\n"
else:
outputString = ""
outputString += str(t0) + ", " + str(currentPrice)
if dt>1:
outputString += ", " + str(int(dt))
outputString += '\n'
open(pricesFolder+self.name+'.dat','a').write(outputString)
if verbose:
print self.name, outputString
def loadPrices(self, pricesPath="", eraseOld=True):
if eraseOld:
self.pricesList = []
if pricesPath == "":
pricesPath = pricesFolder + self.name + ".dat"
if not os.path.exists(pricesPath):
return 0
raw_text = open(pricesPath, 'r').read()
x = raw_text.split('\n')[1:]
xx = [v.split(',') for v in x]
for u in xx:
print u
if len(u) ==2:
self.pricesList.append({'price' : float(u[1]),
'pingTime' : float(u[0] ),
'responseTime': 0
})
elif len(u) ==3:
self.pricesList.append({'price' : float(u[1]),
'pingTime' : float(u[0]) ,
'responseTime': float(u[2])
})
def load(self, *args, **kwargs):
self.loadPrices(*args, **kwargs)
def plot(self, display=True):
y = [v['price'] for v in self.pricesList]
x = [v['pingTime'] for v in self.pricesList]
plt.plot(x,y)
plt.title(self.symbol)
if display:
plt.show()
############################
# defining the functions
def getStockSymbolsList1():
for N in range(9999):
try:
s = stock(N)
stockSymbolsList.append(N)
print N, s.name, "<-------------added"
except:
print N, "doesn't exist!"
return stocksSymbolsList
def getStockSymbolsList2(url="http://sheet1688.blogspot.tw/2008/11/blog-post_18.html"):
raw_text = urllib2.urlopen(url).read()
symbols = re.findall(ur'(?<=num\>)\d\d\d\d(?=\<\/td\>)', raw_text, re.UNICODE)
symbols.sort()
pickle.dump(symbols, open(outputFolder+stockSymbolsFile,'w'))
stockSymbolsList = symbols
return symbols
def loadStockSymbolsList(path=outputFolder+stockSymbolsFile):
stockSymbolsList = pickle.load(open(path,'r'))
return stockSymbolsList
def makeStocksList(inPath=outputFolder+stockSymbolsFile,
outputFolder=stocksFolder):
symbols = loadStockSymbolsList()
for N in symbols:
try:
st = stock(N)
pickle.dump(st, open(outputFolder+st.name+'.pydump','w'))
print st.name, "-->", outputFolder+st.name+'.pydump'
except:
print "stock symbol", N, "not found!!!!"
def loadStocksList(inputFolder=stocksFolder):
stocksList = []
L = os.listdir(inputFolder)
L.sort(key=lambda v: v[-13:-7])
for fileName in L:
stocksList.append(pickle.load(open(inputFolder+fileName,'r')))
return stocksList
############################
# test run
def main0():
for st in stocksList:
st()
st.getPriceList(repetitions=5, throttle=0.3)
def main1(throttle=0.5):
for st in stocksList:
st.load()
st()
print "=================="
while True:
time0 = time.time()
if time.time() - time0 > 600:
for st in stocksList:
st()
try:
st.writeCurrentPrice()
except:
print "writeCurrentPrice() -- error!"
time0 = time.time()
for st in stocksList:
st.getCurrentPrice()
time.sleep(throttle)
def main2():
print "=================="
print time.asctime(time.localtime(time.time()))
#symbols = loadStockSymbolsList()
while True:
stocks = loadStocksList() #clean up every day
while time.localtime(time.time()).tm_wday > 4: #weekends
pass
while time.localtime(time.time()).tm_hour<9:
pass
while (time.localtime(time.time()).tm_hour >=9 and \
time.localtime(time.time()).tm_hour < 13) or \
(time.localtime(time.time()).tm_hour==13 and time.localtime(time.time()).tm_min<=30):
for st in stocks:
try:
currentPrice, t0, dt = st.getCurrentPrice()
if not os.path.exists(pricesFolder+st.name+'.dat'):
outputString = "time, price, response time\n"
else:
outputString = ""
outputString += str(t0) + ", " + str(currentPrice)
if dt>1:
outputString += ", " + str(int(dt))
outputString += '\n'
open(pricesFolder+st.name+'.dat','a').write(outputString)
time.sleep(.5)
except:
print "ERROR!! <------ ", st.name
T = time.localtime()
print time.asctime(T)
#if T.tm_hour < 9 or T.tm_hour>=13 and T.tm_min>=30:
# time.sleep(86400 - (13-9)*3600 - 30*60)
print "End of the trading session of the day!"
def main(*args, **kwargs):
main1(*args, **kwargs)
if __name__=="__main__":
############################
# constructing examples
tainam = stock(symbol='1473')
chenpinsen = stock(symbol=2926)
ganung = stock(symbol=2374)
tungyang = stock(symbol=1319)
htc = stock(2498)
prince = stock(2511)
stocksList = [tainam, chenpinsen, ganung, tungyang, htc, prince]
##############################
# test run
main(60)
|
cc0-1.0
|
pplatek/odoo
|
addons/account/wizard/account_journal_select.py
|
385
|
2068
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class account_journal_select(osv.osv_memory):
"""
Account Journal Select
"""
_name = "account.journal.select"
_description = "Account Journal Select"
def action_open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
if context is None:
context = {}
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_move_line_select')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id])[0]
cr.execute('select journal_id, period_id from account_journal_period where id=%s', (context['active_id'],))
res = cr.fetchone()
if res:
journal_id, period_id = res
result['domain'] = str([('journal_id', '=', journal_id), ('period_id', '=', period_id)])
result['context'] = str({'journal_id': journal_id, 'period_id': period_id})
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
cyrusin/tornado
|
tornado/test/runtests.py
|
1
|
7199
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import gc
import locale # system locale module, not tornado.locale
import logging
import operator
import textwrap
import sys
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.netutil import Resolver
from tornado.options import define, options, add_parse_callback
from tornado.test.util import unittest
try:
reduce # py2
except NameError:
from functools import reduce # py3
TEST_MODULES = [
'tornado.httputil.doctests',
'tornado.iostream.doctests',
'tornado.util.doctests',
'tornado.test.asyncio_test',
'tornado.test.auth_test',
'tornado.test.concurrent_test',
'tornado.test.curl_httpclient_test',
'tornado.test.escape_test',
'tornado.test.gen_test',
'tornado.test.http1connection_test',
'tornado.test.httpclient_test',
'tornado.test.httpserver_test',
'tornado.test.httputil_test',
'tornado.test.import_test',
'tornado.test.ioloop_test',
'tornado.test.iostream_test',
'tornado.test.locale_test',
'tornado.test.locks_test',
'tornado.test.netutil_test',
'tornado.test.log_test',
'tornado.test.options_test',
'tornado.test.process_test',
'tornado.test.queues_test',
'tornado.test.simple_httpclient_test',
'tornado.test.stack_context_test',
'tornado.test.tcpclient_test',
'tornado.test.tcpserver_test',
'tornado.test.template_test',
'tornado.test.testing_test',
'tornado.test.twisted_test',
'tornado.test.util_test',
'tornado.test.web_test',
'tornado.test.websocket_test',
'tornado.test.windows_test',
'tornado.test.wsgi_test',
]
def all():
return unittest.defaultTestLoader.loadTestsFromNames(TEST_MODULES)
class TornadoTextTestRunner(unittest.TextTestRunner):
def run(self, test):
result = super(TornadoTextTestRunner, self).run(test)
if result.skipped:
skip_reasons = set(reason for (test, reason) in result.skipped)
self.stream.write(textwrap.fill(
"Some tests were skipped because: %s" %
", ".join(sorted(skip_reasons))))
self.stream.write("\n")
return result
class LogCounter(logging.Filter):
"""Counts the number of WARNING or higher log records."""
def __init__(self, *args, **kwargs):
# Can't use super() because logging.Filter is an old-style class in py26
logging.Filter.__init__(self, *args, **kwargs)
self.warning_count = self.error_count = 0
def filter(self, record):
if record.levelno >= logging.ERROR:
self.error_count += 1
elif record.levelno >= logging.WARNING:
self.warning_count += 1
return True
def main():
# The -W command-line option does not work in a virtualenv with
# python 3 (as of virtualenv 1.7), so configure warnings
# programmatically instead.
import warnings
# Be strict about most warnings. This also turns on warnings that are
# ignored by default, including DeprecationWarnings and
# python 3.2's ResourceWarnings.
warnings.filterwarnings("error")
# setuptools sometimes gives ImportWarnings about things that are on
# sys.path even if they're not being used.
warnings.filterwarnings("ignore", category=ImportWarning)
# Tornado generally shouldn't use anything deprecated, but some of
# our dependencies do (last match wins).
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("error", category=DeprecationWarning,
module=r"tornado\..*")
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
warnings.filterwarnings("error", category=PendingDeprecationWarning,
module=r"tornado\..*")
# The unittest module is aggressive about deprecating redundant methods,
# leaving some without non-deprecated spellings that work on both
# 2.7 and 3.2
warnings.filterwarnings("ignore", category=DeprecationWarning,
message="Please use assert.* instead")
# unittest2 0.6 on py26 reports these as PendingDeprecationWarnings
# instead of DeprecationWarnings.
warnings.filterwarnings("ignore", category=PendingDeprecationWarning,
message="Please use assert.* instead")
# Twisted 15.0.0 triggers some warnings on py3 with -bb.
warnings.filterwarnings("ignore", category=BytesWarning,
module=r"twisted\..*")
logging.getLogger("tornado.access").setLevel(logging.CRITICAL)
define('httpclient', type=str, default=None,
callback=lambda s: AsyncHTTPClient.configure(
s, defaults=dict(allow_ipv6=False)))
define('httpserver', type=str, default=None,
callback=HTTPServer.configure)
define('ioloop', type=str, default=None)
define('ioloop_time_monotonic', default=False)
define('resolver', type=str, default=None,
callback=Resolver.configure)
define('debug_gc', type=str, multiple=True,
help="A comma-separated list of gc module debug constants, "
"e.g. DEBUG_STATS or DEBUG_COLLECTABLE,DEBUG_OBJECTS",
callback=lambda values: gc.set_debug(
reduce(operator.or_, (getattr(gc, v) for v in values))))
define('locale', type=str, default=None,
callback=lambda x: locale.setlocale(locale.LC_ALL, x))
def configure_ioloop():
kwargs = {}
if options.ioloop_time_monotonic:
from tornado.platform.auto import monotonic_time
if monotonic_time is None:
raise RuntimeError("monotonic clock not found")
kwargs['time_func'] = monotonic_time
if options.ioloop or kwargs:
IOLoop.configure(options.ioloop, **kwargs)
add_parse_callback(configure_ioloop)
log_counter = LogCounter()
add_parse_callback(
lambda: logging.getLogger().handlers[0].addFilter(log_counter))
import tornado.testing
kwargs = {}
if sys.version_info >= (3, 2):
# HACK: unittest.main will make its own changes to the warning
# configuration, which may conflict with the settings above
# or command-line flags like -bb. Passing warnings=False
# suppresses this behavior, although this looks like an implementation
# detail. http://bugs.python.org/issue15626
kwargs['warnings'] = False
kwargs['testRunner'] = TornadoTextTestRunner
try:
tornado.testing.main(**kwargs)
finally:
# The tests should run clean; consider it a failure if they logged
# any warnings or errors. We'd like to ban info logs too, but
# we can't count them cleanly due to interactions with LogTrapTestCase.
if log_counter.warning_count > 0 or log_counter.error_count > 0:
logging.error("logged %d warnings and %d errors",
log_counter.warning_count, log_counter.error_count)
sys.exit(1)
if __name__ == '__main__':
main()
|
apache-2.0
|
Bunshinn/myPython
|
智联职位爬取/jobs_of_data_analysis_on_zhilian.py
|
1
|
2488
|
# -*- coding: utf-8 -*-
"""
@author: QQ:412319433
"""
import requests
from bs4 import BeautifulSoup
import sqlite3
import time
s = time.clock()
def htmlparse(bs, tag, attr = None, attrs= None):
'''
bs : BeautifulSoup对象
tag : 要处理的标签名
attr : 要获取属性值的属性名
attrs : 要查找的属性值对
'''
content = bs.find_all(tag, attrs = attrs)
if attr != None and attrs == None:
for i in range(len(content)):
content[i] = content[i][attr]
else:
for i in range(len(content)):
content[i] = content[i].get_text()
return content
def getcontents(url, data):
try:
joblist = []
r = requests.get(url, data)
bs = BeautifulSoup(r.content.decode())
bs = bs.find('div', attrs={'class' : 'r_searchlist positiolist'})
jobname = htmlparse(bs, 'div',attrs={'class' : 'jobname'})
companyname = htmlparse(bs, 'div',attrs={'class' : 'companyname'})
education = htmlparse(bs, 'span',attrs={'class' : 'education'})
salary = htmlparse(bs, 'div',attrs={'class' : 'salary'})
url = htmlparse(bs, 'a', 'href')
joblist.extend(list(zip(jobname, companyname, education, salary, url)))
except:
print(url)
finally:
return joblist
conn = sqlite3.connect('d:/jobs.db')
c = conn.cursor()
c.execute('''
create table jobs(id INTEGER PRIMARY KEY AUTOINCREMENT,
jobname nvarchar(50),
companyname nvarchar(100),
education nvarchar(100),
salary nvarchar(50),
url nvarchar(50)
);
''')
#sql = 'insert into details (id,stime,buyer,agency,noticetype,privonce,memo) values(?, ?, ?, ?, ?, ?,?)'
sql = 'insert into jobs (jobname,companyname,education,salary,url) values(?, ?, ?, ?, ?)'
outlist = []
#for idn,keyinfo in c.fetchall():
# text = re.split(r'[|]+', keyinfo)
# agency, noticeclass, _ = text[2].split('\n')
# outlist.append((idn,text[0].strip(), text[1].strip(), agency.strip(), noticeclass.strip(), text[3].strip(), text[4].strip()))
url = 'http://m.zhaopin.com/shenzhen-765/?'
data = {
'keyword': '数据分析',
'maprange' : 3,
'islocation' : 0,
'pageindex' : 1
}
pageNum = 455
for i in range(pageNum):
data['pageindex'] = i +1
outlist.extend(getcontents(url, data))
if i%20 == 0 or i == pageNum:
c.executemany(sql, outlist)
conn.commit()
c.close()
conn.close()
e = time.clock()
print('use: %f s' % (e - s))
|
gpl-3.0
|
popbones/jieba
|
test/parallel/test_pos.py
|
65
|
4862
|
#encoding=utf-8
from __future__ import print_function
import sys
sys.path.append("../../")
import jieba
jieba.enable_parallel(4)
import jieba.posseg as pseg
def cuttest(test_sent):
result = pseg.cut(test_sent)
for w in result:
print(w.word, "/", w.flag, ", ", end=' ')
print("")
if __name__ == "__main__":
cuttest("这是一个伸手不见五指的黑夜。我叫孙悟空,我爱北京,我爱Python和C++。")
cuttest("我不喜欢日本和服。")
cuttest("雷猴回归人间。")
cuttest("工信处女干事每月经过下属科室都要亲口交代24口交换机等技术性器件的安装工作")
cuttest("我需要廉租房")
cuttest("永和服装饰品有限公司")
cuttest("我爱北京天安门")
cuttest("abc")
cuttest("隐马尔可夫")
cuttest("雷猴是个好网站")
cuttest("“Microsoft”一词由“MICROcomputer(微型计算机)”和“SOFTware(软件)”两部分组成")
cuttest("草泥马和欺实马是今年的流行词汇")
cuttest("伊藤洋华堂总府店")
cuttest("中国科学院计算技术研究所")
cuttest("罗密欧与朱丽叶")
cuttest("我购买了道具和服装")
cuttest("PS: 我觉得开源有一个好处,就是能够敦促自己不断改进,避免敞帚自珍")
cuttest("湖北省石首市")
cuttest("湖北省十堰市")
cuttest("总经理完成了这件事情")
cuttest("电脑修好了")
cuttest("做好了这件事情就一了百了了")
cuttest("人们审美的观点是不同的")
cuttest("我们买了一个美的空调")
cuttest("线程初始化时我们要注意")
cuttest("一个分子是由好多原子组织成的")
cuttest("祝你马到功成")
cuttest("他掉进了无底洞里")
cuttest("中国的首都是北京")
cuttest("孙君意")
cuttest("外交部发言人马朝旭")
cuttest("领导人会议和第四届东亚峰会")
cuttest("在过去的这五年")
cuttest("还需要很长的路要走")
cuttest("60周年首都阅兵")
cuttest("你好人们审美的观点是不同的")
cuttest("买水果然后来世博园")
cuttest("买水果然后去世博园")
cuttest("但是后来我才知道你是对的")
cuttest("存在即合理")
cuttest("的的的的的在的的的的就以和和和")
cuttest("I love你,不以为耻,反以为rong")
cuttest("因")
cuttest("")
cuttest("hello你好人们审美的观点是不同的")
cuttest("很好但主要是基于网页形式")
cuttest("hello你好人们审美的观点是不同的")
cuttest("为什么我不能拥有想要的生活")
cuttest("后来我才")
cuttest("此次来中国是为了")
cuttest("使用了它就可以解决一些问题")
cuttest(",使用了它就可以解决一些问题")
cuttest("其实使用了它就可以解决一些问题")
cuttest("好人使用了它就可以解决一些问题")
cuttest("是因为和国家")
cuttest("老年搜索还支持")
cuttest("干脆就把那部蒙人的闲法给废了拉倒!RT @laoshipukong : 27日,全国人大常委会第三次审议侵权责任法草案,删除了有关医疗损害责任“举证倒置”的规定。在医患纠纷中本已处于弱势地位的消费者由此将陷入万劫不复的境地。 ")
cuttest("大")
cuttest("")
cuttest("他说的确实在理")
cuttest("长春市长春节讲话")
cuttest("结婚的和尚未结婚的")
cuttest("结合成分子时")
cuttest("旅游和服务是最好的")
cuttest("这件事情的确是我的错")
cuttest("供大家参考指正")
cuttest("哈尔滨政府公布塌桥原因")
cuttest("我在机场入口处")
cuttest("邢永臣摄影报道")
cuttest("BP神经网络如何训练才能在分类时增加区分度?")
cuttest("南京市长江大桥")
cuttest("应一些使用者的建议,也为了便于利用NiuTrans用于SMT研究")
cuttest('长春市长春药店')
cuttest('邓颖超生前最喜欢的衣服')
cuttest('胡锦涛是热爱世界和平的政治局常委')
cuttest('程序员祝海林和朱会震是在孙健的左面和右面, 范凯在最右面.再往左是李松洪')
cuttest('一次性交多少钱')
cuttest('两块五一套,三块八一斤,四块七一本,五块六一条')
cuttest('小和尚留了一个像大和尚一样的和尚头')
cuttest('我是中华人民共和国公民;我爸爸是共和党党员; 地铁和平门站')
cuttest('张晓梅去人民医院做了个B超然后去买了件T恤')
cuttest('AT&T是一件不错的公司,给你发offer了吗?')
cuttest('C++和c#是什么关系?11+122=133,是吗?PI=3.14159')
cuttest('你认识那个和主席握手的的哥吗?他开一辆黑色的士。')
|
mit
|
ryfeus/lambda-packs
|
Tensorflow/source/tensorflow/core/framework/types_pb2.py
|
2
|
8950
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/types.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/types.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n%tensorflow/core/framework/types.proto\x12\ntensorflow*\xe6\x05\n\x08\x44\x61taType\x12\x0e\n\nDT_INVALID\x10\x00\x12\x0c\n\x08\x44T_FLOAT\x10\x01\x12\r\n\tDT_DOUBLE\x10\x02\x12\x0c\n\x08\x44T_INT32\x10\x03\x12\x0c\n\x08\x44T_UINT8\x10\x04\x12\x0c\n\x08\x44T_INT16\x10\x05\x12\x0b\n\x07\x44T_INT8\x10\x06\x12\r\n\tDT_STRING\x10\x07\x12\x10\n\x0c\x44T_COMPLEX64\x10\x08\x12\x0c\n\x08\x44T_INT64\x10\t\x12\x0b\n\x07\x44T_BOOL\x10\n\x12\x0c\n\x08\x44T_QINT8\x10\x0b\x12\r\n\tDT_QUINT8\x10\x0c\x12\r\n\tDT_QINT32\x10\r\x12\x0f\n\x0b\x44T_BFLOAT16\x10\x0e\x12\r\n\tDT_QINT16\x10\x0f\x12\x0e\n\nDT_QUINT16\x10\x10\x12\r\n\tDT_UINT16\x10\x11\x12\x11\n\rDT_COMPLEX128\x10\x12\x12\x0b\n\x07\x44T_HALF\x10\x13\x12\x0f\n\x0b\x44T_RESOURCE\x10\x14\x12\x0e\n\nDT_VARIANT\x10\x15\x12\x10\n\x0c\x44T_FLOAT_REF\x10\x65\x12\x11\n\rDT_DOUBLE_REF\x10\x66\x12\x10\n\x0c\x44T_INT32_REF\x10g\x12\x10\n\x0c\x44T_UINT8_REF\x10h\x12\x10\n\x0c\x44T_INT16_REF\x10i\x12\x0f\n\x0b\x44T_INT8_REF\x10j\x12\x11\n\rDT_STRING_REF\x10k\x12\x14\n\x10\x44T_COMPLEX64_REF\x10l\x12\x10\n\x0c\x44T_INT64_REF\x10m\x12\x0f\n\x0b\x44T_BOOL_REF\x10n\x12\x10\n\x0c\x44T_QINT8_REF\x10o\x12\x11\n\rDT_QUINT8_REF\x10p\x12\x11\n\rDT_QINT32_REF\x10q\x12\x13\n\x0f\x44T_BFLOAT16_REF\x10r\x12\x11\n\rDT_QINT16_REF\x10s\x12\x12\n\x0e\x44T_QUINT16_REF\x10t\x12\x11\n\rDT_UINT16_REF\x10u\x12\x15\n\x11\x44T_COMPLEX128_REF\x10v\x12\x0f\n\x0b\x44T_HALF_REF\x10w\x12\x13\n\x0f\x44T_RESOURCE_REF\x10x\x12\x12\n\x0e\x44T_VARIANT_REF\x10yB,\n\x18org.tensorflow.frameworkB\x0bTypesProtosP\x01\xf8\x01\x01\x62\x06proto3')
)
_DATATYPE = _descriptor.EnumDescriptor(
name='DataType',
full_name='tensorflow.DataType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DT_INVALID', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_FLOAT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_DOUBLE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT32', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT8', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT16', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT8', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_STRING', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_COMPLEX64', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT64', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_BOOL', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT8', index=11, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QUINT8', index=12, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT32', index=13, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_BFLOAT16', index=14, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT16', index=15, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QUINT16', index=16, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT16', index=17, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_COMPLEX128', index=18, number=18,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_HALF', index=19, number=19,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_RESOURCE', index=20, number=20,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_VARIANT', index=21, number=21,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_FLOAT_REF', index=22, number=101,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_DOUBLE_REF', index=23, number=102,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT32_REF', index=24, number=103,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT8_REF', index=25, number=104,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT16_REF', index=26, number=105,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT8_REF', index=27, number=106,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_STRING_REF', index=28, number=107,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_COMPLEX64_REF', index=29, number=108,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_INT64_REF', index=30, number=109,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_BOOL_REF', index=31, number=110,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT8_REF', index=32, number=111,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QUINT8_REF', index=33, number=112,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT32_REF', index=34, number=113,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_BFLOAT16_REF', index=35, number=114,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QINT16_REF', index=36, number=115,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_QUINT16_REF', index=37, number=116,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_UINT16_REF', index=38, number=117,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_COMPLEX128_REF', index=39, number=118,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_HALF_REF', index=40, number=119,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_RESOURCE_REF', index=41, number=120,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DT_VARIANT_REF', index=42, number=121,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=54,
serialized_end=796,
)
_sym_db.RegisterEnumDescriptor(_DATATYPE)
DataType = enum_type_wrapper.EnumTypeWrapper(_DATATYPE)
DT_INVALID = 0
DT_FLOAT = 1
DT_DOUBLE = 2
DT_INT32 = 3
DT_UINT8 = 4
DT_INT16 = 5
DT_INT8 = 6
DT_STRING = 7
DT_COMPLEX64 = 8
DT_INT64 = 9
DT_BOOL = 10
DT_QINT8 = 11
DT_QUINT8 = 12
DT_QINT32 = 13
DT_BFLOAT16 = 14
DT_QINT16 = 15
DT_QUINT16 = 16
DT_UINT16 = 17
DT_COMPLEX128 = 18
DT_HALF = 19
DT_RESOURCE = 20
DT_VARIANT = 21
DT_FLOAT_REF = 101
DT_DOUBLE_REF = 102
DT_INT32_REF = 103
DT_UINT8_REF = 104
DT_INT16_REF = 105
DT_INT8_REF = 106
DT_STRING_REF = 107
DT_COMPLEX64_REF = 108
DT_INT64_REF = 109
DT_BOOL_REF = 110
DT_QINT8_REF = 111
DT_QUINT8_REF = 112
DT_QINT32_REF = 113
DT_BFLOAT16_REF = 114
DT_QINT16_REF = 115
DT_QUINT16_REF = 116
DT_UINT16_REF = 117
DT_COMPLEX128_REF = 118
DT_HALF_REF = 119
DT_RESOURCE_REF = 120
DT_VARIANT_REF = 121
DESCRIPTOR.enum_types_by_name['DataType'] = _DATATYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\013TypesProtosP\001\370\001\001'))
# @@protoc_insertion_point(module_scope)
|
mit
|
Z2PackDev/bandstructure_utils
|
bands_inspect/eigenvals.py
|
2
|
3489
|
# -*- coding: utf-8 -*-
# (c) 2017-2019, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""
Defines the data container for eigenvalue data (bandstructures).
"""
import types
import numpy as np
from fsc.export import export
from fsc.hdf5_io import HDF5Enabled, subscribe_hdf5
from .kpoints import KpointsExplicit, KpointsBase
from .io import from_hdf5
@export
@subscribe_hdf5(
'bands_inspect.eigenvals_data', extra_tags=('eigenvals_data', )
)
class EigenvalsData(HDF5Enabled, types.SimpleNamespace):
"""
Data container for the eigenvalues at a given set of k-points. The eigenvalues are automatically sorted by value.
:param kpoints: List of k-points where the eigenvalues are given.
:type kpoints: list
:param eigenvals: Eigenvalues at each k-point. The outer axis corresponds to the different k-points, and the inner axis corresponds to the different eigenvalues at a given k-point.
:type eigenvals: 2D array
"""
def __init__(self, *, kpoints, eigenvals):
if not isinstance(kpoints, KpointsBase):
kpoints = KpointsExplicit(kpoints)
eigenvals = np.sort(eigenvals)
if len(kpoints.kpoints_explicit) != len(eigenvals):
raise ValueError(
"Number of kpoints ({}) does not match the number of eigenvalue lists ({})"
.format(len(kpoints.kpoints_explicit), len(eigenvals))
)
self.kpoints = kpoints
self.eigenvals = eigenvals
def slice_bands(self, band_idx):
"""
Returns a new instance which contains only the bands given in the index.
:param band_idx: Indices for the bands in the new instance.
:type band_idx: list
"""
new_eigenvals = self.eigenvals.T[sorted(band_idx)].T
return type(self)(kpoints=self.kpoints, eigenvals=new_eigenvals)
@classmethod
def from_eigenval_function(
cls, *, kpoints, eigenval_function, listable=False
):
"""
Create an instance using a function that calculates the eigenvalues.
:param kpoints: k-points for which the eigenvalues are to be calculated.
:type kpoints: KpointsBase
:param eigenval_function: Function which calculates the eigenvalues.
:param listable: Flag showing whether the function can handle a list of k-points (``True``) or only single k-points (``False``).
:type listable: bool
"""
if listable:
eigenvals = eigenval_function(kpoints.kpoints_explicit)
else:
eigenvals = [
eigenval_function(k) for k in kpoints.kpoints_explicit
]
return cls(kpoints=kpoints, eigenvals=eigenvals)
def to_hdf5(self, hdf5_handle):
hdf5_handle.create_group('kpoints_obj')
self.kpoints.to_hdf5(hdf5_handle['kpoints_obj'])
hdf5_handle['eigenvals'] = self.eigenvals
@classmethod
def from_hdf5(cls, hdf5_handle):
kpoints = from_hdf5(hdf5_handle['kpoints_obj'])
eigenvals = hdf5_handle['eigenvals'][()]
return cls(kpoints=kpoints, eigenvals=eigenvals)
def shift(self, value):
"""
Returns an instance with eigenvalues shifted by the given value.
:param value: The value by which the eigenvalues are shifted.
:type value: float
"""
new_eigenvals = self.eigenvals + value
return type(self)(kpoints=self.kpoints, eigenvals=new_eigenvals)
|
gpl-3.0
|
windyuuy/opera
|
chromium/src/tools/gyp/test/copies/gyptest-all.py
|
264
|
1289
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies file copies using an explicit build target of 'all'.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('copies.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('copies.gyp', test.ALL, chdir='relocate/src')
test.must_match(['relocate', 'src', 'copies-out', 'file1'], 'file1 contents\n')
test.built_file_must_match('copies-out/file2',
'file2 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/file3',
'file3 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/file4',
'file4 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/subdir/file5',
'file5 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/subdir/file6',
'file6 contents\n',
chdir='relocate/src')
test.pass_test()
|
bsd-3-clause
|
lukas-ke/faint-graphics-editor
|
build-sys/build_sys/help_util/markup_regex.py
|
1
|
2539
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import re
# Section-title regexes
title_content = " (.*?)(?:\{(.*?)\})? "
title1 = re.compile("^=%s=$" % title_content)
title2 = re.compile("^==%s==$" % title_content)
title3 = re.compile("^===%s===$" % title_content)
# Comment to end of line
re_comment = re.compile(r'#.*')
# Summary of a page
re_summary = re.compile('^summary\:"(.+)"$')
# Instruction to summarize child pages
re_summarize_children = re.compile(r'^\\child_summary$')
# Label for cross-referencing
label = re.compile(r"^\\label\((.*?)\)$")
# Bullet points for bullet lists
bullet = re.compile("^\* (.*)$")
# Image filename from images/
re_image = re.compile(r"\\image\((.*?)\)")
# Image map description file
re_image_map = re.compile(r"\\imagemap\((.*?)\)")
# Image filename from graphics/
re_graphic = re.compile(r"\\graphic\((.*?)\)")
# Include raw content
re_include = re.compile(r"\\include\((.*?)\)")
# Include raw content
re_verbatim_include = re.compile(r"\\verbatim_include\((.*?)\)")
# Include other page
re_insert = re.compile(r"\\insert\((.*?)\)")
# Font styles
bold = re.compile("\*(.*?)\*")
italic = re.compile("\'(.*?)\'")
# Centered text
center = re.compile("/(.*?)/")
# Horizontal line
hr = re.compile("^---$")
# Reference to a label or page
re_ref = re.compile(r"\\ref\((.*?)\)")
# External reference
re_extref = re.compile(r"\\extref\((.*?)\)")
# Include Python code file
re_py_sample = re.compile(r"\\py_example\((.*)\)")
# A table row (with one or more cells)
table_row = re.compile("^\|\|.*\|\|$")
# Table style definition
table_style = re.compile("^tablestyle:(.*)$")
# Table widths definition
table_widths = re.compile("^tablewidths:(.*)$")
re_withbind = re.compile("withbind\:(.*?);")
re_bind = re.compile("bind\:(.*?);")
re_bindlist = re.compile(r"^\\bindlist\(\)$")
re_configpath = re.compile(r"\\pyconfigpath")
|
apache-2.0
|
pchauncey/ansible
|
lib/ansible/modules/windows/win_regmerge.py
|
33
|
3903
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_regmerge
version_added: "2.1"
short_description: Merges the contents of a registry file into the windows registry
description:
- Wraps the reg.exe command to import the contents of a registry file.
- Suitable for use with registry files created using M(win_template).
- Windows registry files have a specific format and must be constructed correctly with carriage return and line feed line endings otherwise they will not
be merged.
- Exported registry files often start with a Byte Order Mark which must be removed if the file is to templated using M(win_template).
- Registry file format is described at U(https://support.microsoft.com/en-us/kb/310516)
- See also M(win_template), M(win_regedit)
options:
path:
description:
- The full path including file name to the registry file on the remote machine to be merged
required: true
default: no default
compare_key:
description:
- The parent key to use when comparing the contents of the registry to the contents of the file. Needs to be in HKLM or HKCU part of registry.
Use a PS-Drive style path for example HKLM:\SOFTWARE not HKEY_LOCAL_MACHINE\SOFTWARE
If not supplied, or the registry key is not found, no comparison will be made, and the module will report changed.
required: false
default: no default
author: "Jon Hawkesworth (@jhawkesworth)"
notes:
- Organise your registry files so that they contain a single root registry
key if you want to use the compare_to functionality.
This module does not force registry settings to be in the state
described in the file. If registry settings have been modified externally
the module will merge the contents of the file but continue to report
differences on subsequent runs.
To force registry change, use M(win_regedit) with state=absent before
using M(win_regmerge).
'''
EXAMPLES = r'''
# Merge in a registry file without comparing to current registry
# Note that paths using / to separate are preferred as they require less special handling than \
win_regmerge:
path: C:/autodeploy/myCompany-settings.reg
# Compare and merge registry file
win_regmerge:
path: C:/autodeploy/myCompany-settings.reg
compare_to: HKLM:\SOFTWARE\myCompany
'''
RETURN = r'''
compare_to_key_found:
description: whether the parent registry key has been found for comparison
returned: when comparison key not found in registry
type: boolean
sample: false
difference_count:
description: number of differences between the registry and the file
returned: changed
type: int
sample: 1
compared:
description: whether a comparison has taken place between the registry and the file
returned: when a comparison key has been supplied and comparison has been attempted
type: boolean
sample: true
'''
|
gpl-3.0
|
apostle/apostle.py
|
apostle/mail.py
|
1
|
2161
|
import json
import base64
class Mail(object):
"""
An object that represents a single email to be sent via Apostle.io
Arbitrary attributes can be added at runtime and will be sent as
the 'data' key to Apostle.io
"""
# The template slug to be sent
template_id = None
# The email address to be sent to
email = None
# Any extra data that the template requires
data = {}
# Overrides the template's from address
from_address = None
# Additional headers to be sent to Apostle.io
headers = {}
# Override the default template layout
layout_id = None
# The name of the recipient
name = None
# Reply To address
reply_to = None
# Attachments
attachments = []
# In the event of an error, the message will be stored here
_error_message = None
def __init__(self, template_id, options):
""" Initializes a new Mail object
:param template_id: The Apostle.io template slug
:param options: A hash of template data
"""
super(Mail, self).__init__()
# Reset Instance attributes
self.template_id = None
self.email = None
self.data = {}
self.from_address = None
self.headers = {}
self.layout_id = None
self.name = None
self.reply_to = None
self._error_message = None
self.attachments = []
self.template_id = template_id
if options:
for name, value in options.items():
self.__setattr__(name, value)
def __setattr__(self, name, value):
if hasattr(self, name):
self.__dict__[name] = value
else:
self.data[name] = value
def set_error(self, message):
self._error_message = message
def to_recipient_dict(self):
recipient_dict = {
'data': self.data,
'from': self.from_address,
'headers': self.headers,
'layout_id': self.layout_id,
'name': self.name,
'reply_to': self.reply_to,
'template_id': self.template_id,
'attachments': self.attachments
}
for key in list(recipient_dict.keys()):
val = recipient_dict[key]
if val == None or val == {} or val == []:
del recipient_dict[key]
return { self.email: recipient_dict }
def add_attachment(self, name, content):
self.attachments.append({
"name": name,
"data": base64.b64encode(content)
})
|
mit
|
petrjasek/superdesk-core
|
superdesk/data_updates/00007_20180321-092824_archive.py
|
2
|
1613
|
# -*- coding: utf-8; -*-
# This file is part of Superdesk.
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
#
# Author : tomas
# Creation: 2018-03-21 09:28
import superdesk
import subprocess
import json
from superdesk.commands.data_updates import BaseDataUpdate
from os.path import realpath, join, dirname
node_script_path = join(dirname(realpath(superdesk.__file__)), "data_updates", "00007_20180321-092824_archive.dist.js")
def get_updated_editor_state(editor_state):
try:
with subprocess.Popen(["node", node_script_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE) as p:
output, err = p.communicate(bytes(json.dumps(editor_state), "utf-8"))
return json.loads(output.decode("utf-8"))
except Exception:
return editor_state
class DataUpdate(BaseDataUpdate):
resource = "archive" # will use multiple resources, keeping this here so validation passes
def forwards(self, mongodb_collection, mongodb_database):
for resource in ["archive", "archive_autosave", "published"]:
collection = mongodb_database[resource]
for item in collection.find({"editor_state": {"$exists": True}}):
print(
collection.update(
{"_id": item["_id"]}, {"$set": {"editor_state": get_updated_editor_state(item["editor_state"])}}
)
)
def backwards(self, mongodb_collection, mongodb_database):
pass
|
agpl-3.0
|
yangkf1985/tornado
|
tornado/httpclient.py
|
104
|
26658
|
"""Blocking and non-blocking HTTP client interfaces.
This module defines a common interface shared by two implementations,
``simple_httpclient`` and ``curl_httpclient``. Applications may either
instantiate their chosen implementation class directly or use the
`AsyncHTTPClient` class from this module, which selects an implementation
that can be overridden with the `AsyncHTTPClient.configure` method.
The default implementation is ``simple_httpclient``, and this is expected
to be suitable for most users' needs. However, some applications may wish
to switch to ``curl_httpclient`` for reasons such as the following:
* ``curl_httpclient`` has some features not found in ``simple_httpclient``,
including support for HTTP proxies and the ability to use a specified
network interface.
* ``curl_httpclient`` is more likely to be compatible with sites that are
not-quite-compliant with the HTTP spec, or sites that use little-exercised
features of HTTP.
* ``curl_httpclient`` is faster.
* ``curl_httpclient`` was the default prior to Tornado 2.0.
Note that if you are using ``curl_httpclient``, it is highly
recommended that you use a recent version of ``libcurl`` and
``pycurl``. Currently the minimum supported version of libcurl is
7.21.1, and the minimum version of pycurl is 7.18.2. It is highly
recommended that your ``libcurl`` installation is built with
asynchronous DNS resolver (threaded or c-ares), otherwise you may
encounter various problems with request timeouts (for more
information, see
http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS
and comments in curl_httpclient.py).
To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import time
import weakref
from tornado.concurrent import TracebackFuture
from tornado.escape import utf8, native_str
from tornado import httputil, stack_context
from tornado.ioloop import IOLoop
from tornado.util import Configurable
class HTTPClient(object):
"""A blocking HTTP client.
This interface is provided for convenience and testing; most applications
that are running an IOLoop will want to use `AsyncHTTPClient` instead.
Typical usage looks like this::
http_client = httpclient.HTTPClient()
try:
response = http_client.fetch("http://www.google.com/")
print response.body
except httpclient.HTTPError as e:
# HTTPError is raised for non-200 responses; the response
# can be found in e.response.
print("Error: " + str(e))
except Exception as e:
# Other errors are possible, such as IOError.
print("Error: " + str(e))
http_client.close()
"""
def __init__(self, async_client_class=None, **kwargs):
self._io_loop = IOLoop(make_current=False)
if async_client_class is None:
async_client_class = AsyncHTTPClient
self._async_client = async_client_class(self._io_loop, **kwargs)
self._closed = False
def __del__(self):
self.close()
def close(self):
"""Closes the HTTPClient, freeing any resources used."""
if not self._closed:
self._async_client.close()
self._io_loop.close()
self._closed = True
def fetch(self, request, **kwargs):
"""Executes a request, returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
If an error occurs during the fetch, we raise an `HTTPError` unless
the ``raise_error`` keyword argument is set to False.
"""
response = self._io_loop.run_sync(functools.partial(
self._async_client.fetch, request, **kwargs))
return response
class AsyncHTTPClient(Configurable):
"""An non-blocking HTTP client.
Example usage::
def handle_request(response):
if response.error:
print "Error:", response.error
else:
print response.body
http_client = AsyncHTTPClient()
http_client.fetch("http://www.google.com/", handle_request)
The constructor for this class is magic in several respects: It
actually creates an instance of an implementation-specific
subclass, and instances are reused as a kind of pseudo-singleton
(one per `.IOLoop`). The keyword argument ``force_instance=True``
can be used to suppress this singleton behavior. Unless
``force_instance=True`` is used, no arguments other than
``io_loop`` should be passed to the `AsyncHTTPClient` constructor.
The implementation subclass as well as arguments to its
constructor can be set with the static method `configure()`
All `AsyncHTTPClient` implementations support a ``defaults``
keyword argument, which can be used to set default values for
`HTTPRequest` attributes. For example::
AsyncHTTPClient.configure(
None, defaults=dict(user_agent="MyUserAgent"))
# or with force_instance:
client = AsyncHTTPClient(force_instance=True,
defaults=dict(user_agent="MyUserAgent"))
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
@classmethod
def configurable_base(cls):
return AsyncHTTPClient
@classmethod
def configurable_default(cls):
from tornado.simple_httpclient import SimpleAsyncHTTPClient
return SimpleAsyncHTTPClient
@classmethod
def _async_clients(cls):
attr_name = '_async_client_dict_' + cls.__name__
if not hasattr(cls, attr_name):
setattr(cls, attr_name, weakref.WeakKeyDictionary())
return getattr(cls, attr_name)
def __new__(cls, io_loop=None, force_instance=False, **kwargs):
io_loop = io_loop or IOLoop.current()
if force_instance:
instance_cache = None
else:
instance_cache = cls._async_clients()
if instance_cache is not None and io_loop in instance_cache:
return instance_cache[io_loop]
instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop,
**kwargs)
# Make sure the instance knows which cache to remove itself from.
# It can't simply call _async_clients() because we may be in
# __new__(AsyncHTTPClient) but instance.__class__ may be
# SimpleAsyncHTTPClient.
instance._instance_cache = instance_cache
if instance_cache is not None:
instance_cache[instance.io_loop] = instance
return instance
def initialize(self, io_loop, defaults=None):
self.io_loop = io_loop
self.defaults = dict(HTTPRequest._DEFAULTS)
if defaults is not None:
self.defaults.update(defaults)
self._closed = False
def close(self):
"""Destroys this HTTP client, freeing any file descriptors used.
This method is **not needed in normal use** due to the way
that `AsyncHTTPClient` objects are transparently reused.
``close()`` is generally only necessary when either the
`.IOLoop` is also being closed, or the ``force_instance=True``
argument was used when creating the `AsyncHTTPClient`.
No other methods may be called on the `AsyncHTTPClient` after
``close()``.
"""
if self._closed:
return
self._closed = True
if self._instance_cache is not None:
if self._instance_cache.get(self.io_loop) is not self:
raise RuntimeError("inconsistent AsyncHTTPClient cache")
del self._instance_cache[self.io_loop]
def fetch(self, request, callback=None, raise_error=True, **kwargs):
"""Executes a request, asynchronously returning an `HTTPResponse`.
The request may be either a string URL or an `HTTPRequest` object.
If it is a string, we construct an `HTTPRequest` using any additional
kwargs: ``HTTPRequest(request, **kwargs)``
This method returns a `.Future` whose result is an
`HTTPResponse`. By default, the ``Future`` will raise an `HTTPError`
if the request returned a non-200 response code. Instead, if
``raise_error`` is set to False, the response will always be
returned regardless of the response code.
If a ``callback`` is given, it will be invoked with the `HTTPResponse`.
In the callback interface, `HTTPError` is not automatically raised.
Instead, you must check the response's ``error`` attribute or
call its `~HTTPResponse.rethrow` method.
"""
if self._closed:
raise RuntimeError("fetch() called on closed AsyncHTTPClient")
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
# We may modify this (to add Host, Accept-Encoding, etc),
# so make sure we don't modify the caller's object. This is also
# where normal dicts get converted to HTTPHeaders objects.
request.headers = httputil.HTTPHeaders(request.headers)
request = _RequestProxy(request, self.defaults)
future = TracebackFuture()
if callback is not None:
callback = stack_context.wrap(callback)
def handle_future(future):
exc = future.exception()
if isinstance(exc, HTTPError) and exc.response is not None:
response = exc.response
elif exc is not None:
response = HTTPResponse(
request, 599, error=exc,
request_time=time.time() - request.start_time)
else:
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
def handle_response(response):
if raise_error and response.error:
future.set_exception(response.error)
else:
future.set_result(response)
self.fetch_impl(request, handle_response)
return future
def fetch_impl(self, request, callback):
raise NotImplementedError()
@classmethod
def configure(cls, impl, **kwargs):
"""Configures the `AsyncHTTPClient` subclass to use.
``AsyncHTTPClient()`` actually creates an instance of a subclass.
This method may be called with either a class object or the
fully-qualified name of such a class (or ``None`` to use the default,
``SimpleAsyncHTTPClient``)
If additional keyword arguments are given, they will be passed
to the constructor of each subclass instance created. The
keyword argument ``max_clients`` determines the maximum number
of simultaneous `~AsyncHTTPClient.fetch()` operations that can
execute in parallel on each `.IOLoop`. Additional arguments
may be supported depending on the implementation class in use.
Example::
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
"""
super(AsyncHTTPClient, cls).configure(impl, **kwargs)
class HTTPRequest(object):
"""HTTP client request object."""
# Default values for HTTPRequest parameters.
# Merged with the values on the request object by AsyncHTTPClient
# implementations.
_DEFAULTS = dict(
connect_timeout=20.0,
request_timeout=20.0,
follow_redirects=True,
max_redirects=5,
decompress_response=True,
proxy_password='',
allow_nonstandard_methods=False,
validate_cert=True)
def __init__(self, url, method="GET", headers=None, body=None,
auth_username=None, auth_password=None, auth_mode=None,
connect_timeout=None, request_timeout=None,
if_modified_since=None, follow_redirects=None,
max_redirects=None, user_agent=None, use_gzip=None,
network_interface=None, streaming_callback=None,
header_callback=None, prepare_curl_callback=None,
proxy_host=None, proxy_port=None, proxy_username=None,
proxy_password=None, allow_nonstandard_methods=None,
validate_cert=None, ca_certs=None,
allow_ipv6=None,
client_key=None, client_cert=None, body_producer=None,
expect_100_continue=False, decompress_response=None,
ssl_options=None):
r"""All parameters except ``url`` are optional.
:arg string url: URL to fetch
:arg string method: HTTP method, e.g. "GET" or "POST"
:arg headers: Additional HTTP headers to pass on the request
:type headers: `~tornado.httputil.HTTPHeaders` or `dict`
:arg body: HTTP request body as a string (byte or unicode; if unicode
the utf-8 encoding will be used)
:arg body_producer: Callable used for lazy/asynchronous request bodies.
It is called with one argument, a ``write`` function, and should
return a `.Future`. It should call the write function with new
data as it becomes available. The write function returns a
`.Future` which can be used for flow control.
Only one of ``body`` and ``body_producer`` may
be specified. ``body_producer`` is not supported on
``curl_httpclient``. When using ``body_producer`` it is recommended
to pass a ``Content-Length`` in the headers as otherwise chunked
encoding will be used, and many servers do not support chunked
encoding on requests. New in Tornado 4.0
:arg string auth_username: Username for HTTP authentication
:arg string auth_password: Password for HTTP authentication
:arg string auth_mode: Authentication mode; default is "basic".
Allowed values are implementation-defined; ``curl_httpclient``
supports "basic" and "digest"; ``simple_httpclient`` only supports
"basic"
:arg float connect_timeout: Timeout for initial connection in seconds
:arg float request_timeout: Timeout for entire request in seconds
:arg if_modified_since: Timestamp for ``If-Modified-Since`` header
:type if_modified_since: `datetime` or `float`
:arg bool follow_redirects: Should redirects be followed automatically
or return the 3xx response?
:arg int max_redirects: Limit for ``follow_redirects``
:arg string user_agent: String to send as ``User-Agent`` header
:arg bool decompress_response: Request a compressed response from
the server and decompress it after downloading. Default is True.
New in Tornado 4.0.
:arg bool use_gzip: Deprecated alias for ``decompress_response``
since Tornado 4.0.
:arg string network_interface: Network interface to use for request.
``curl_httpclient`` only; see note below.
:arg callable streaming_callback: If set, ``streaming_callback`` will
be run with each chunk of data as it is received, and
``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in
the final response.
:arg callable header_callback: If set, ``header_callback`` will
be run with each header line as it is received (including the
first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line
containing only ``\r\n``. All lines include the trailing newline
characters). ``HTTPResponse.headers`` will be empty in the final
response. This is most useful in conjunction with
``streaming_callback``, because it's the only way to get access to
header data while the request is in progress.
:arg callable prepare_curl_callback: If set, will be called with
a ``pycurl.Curl`` object to allow the application to make additional
``setopt`` calls.
:arg string proxy_host: HTTP proxy hostname. To use proxies,
``proxy_host`` and ``proxy_port`` must be set; ``proxy_username`` and
``proxy_pass`` are optional. Proxies are currently only supported
with ``curl_httpclient``.
:arg int proxy_port: HTTP proxy port
:arg string proxy_username: HTTP proxy username
:arg string proxy_password: HTTP proxy password
:arg bool allow_nonstandard_methods: Allow unknown values for ``method``
argument?
:arg bool validate_cert: For HTTPS requests, validate the server's
certificate?
:arg string ca_certs: filename of CA certificates in PEM format,
or None to use defaults. See note below when used with
``curl_httpclient``.
:arg string client_key: Filename for client SSL key, if any. See
note below when used with ``curl_httpclient``.
:arg string client_cert: Filename for client SSL certificate, if any.
See note below when used with ``curl_httpclient``.
:arg ssl.SSLContext ssl_options: `ssl.SSLContext` object for use in
``simple_httpclient`` (unsupported by ``curl_httpclient``).
Overrides ``validate_cert``, ``ca_certs``, ``client_key``,
and ``client_cert``.
:arg bool allow_ipv6: Use IPv6 when available? Default is true.
:arg bool expect_100_continue: If true, send the
``Expect: 100-continue`` header and wait for a continue response
before sending the request body. Only supported with
simple_httpclient.
.. note::
When using ``curl_httpclient`` certain options may be
inherited by subsequent fetches because ``pycurl`` does
not allow them to be cleanly reset. This applies to the
``ca_certs``, ``client_key``, ``client_cert``, and
``network_interface`` arguments. If you use these
options, you should pass them on every request (you don't
have to always use the same values, but it's not possible
to mix requests that specify these options with ones that
use the defaults).
.. versionadded:: 3.1
The ``auth_mode`` argument.
.. versionadded:: 4.0
The ``body_producer`` and ``expect_100_continue`` arguments.
.. versionadded:: 4.2
The ``ssl_options`` argument.
"""
# Note that some of these attributes go through property setters
# defined below.
self.headers = headers
if if_modified_since:
self.headers["If-Modified-Since"] = httputil.format_timestamp(
if_modified_since)
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_username = proxy_username
self.proxy_password = proxy_password
self.url = url
self.method = method
self.body = body
self.body_producer = body_producer
self.auth_username = auth_username
self.auth_password = auth_password
self.auth_mode = auth_mode
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
self.follow_redirects = follow_redirects
self.max_redirects = max_redirects
self.user_agent = user_agent
if decompress_response is not None:
self.decompress_response = decompress_response
else:
self.decompress_response = use_gzip
self.network_interface = network_interface
self.streaming_callback = streaming_callback
self.header_callback = header_callback
self.prepare_curl_callback = prepare_curl_callback
self.allow_nonstandard_methods = allow_nonstandard_methods
self.validate_cert = validate_cert
self.ca_certs = ca_certs
self.allow_ipv6 = allow_ipv6
self.client_key = client_key
self.client_cert = client_cert
self.ssl_options = ssl_options
self.expect_100_continue = expect_100_continue
self.start_time = time.time()
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, value):
if value is None:
self._headers = httputil.HTTPHeaders()
else:
self._headers = value
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = utf8(value)
@property
def body_producer(self):
return self._body_producer
@body_producer.setter
def body_producer(self, value):
self._body_producer = stack_context.wrap(value)
@property
def streaming_callback(self):
return self._streaming_callback
@streaming_callback.setter
def streaming_callback(self, value):
self._streaming_callback = stack_context.wrap(value)
@property
def header_callback(self):
return self._header_callback
@header_callback.setter
def header_callback(self, value):
self._header_callback = stack_context.wrap(value)
@property
def prepare_curl_callback(self):
return self._prepare_curl_callback
@prepare_curl_callback.setter
def prepare_curl_callback(self, value):
self._prepare_curl_callback = stack_context.wrap(value)
class HTTPResponse(object):
"""HTTP Response object.
Attributes:
* request: HTTPRequest object
* code: numeric HTTP status code, e.g. 200 or 404
* reason: human-readable reason phrase describing the status code
* headers: `tornado.httputil.HTTPHeaders` object
* effective_url: final location of the resource after following any
redirects
* buffer: ``cStringIO`` object for response body
* body: response body as string (created on demand from ``self.buffer``)
* error: Exception object, if any
* request_time: seconds from request start to finish
* time_info: dictionary of diagnostic timing information from the request.
Available data are subject to change, but currently uses timings
available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html,
plus ``queue``, which is the delay (if any) introduced by waiting for
a slot under `AsyncHTTPClient`'s ``max_clients`` setting.
"""
def __init__(self, request, code, headers=None, buffer=None,
effective_url=None, error=None, request_time=None,
time_info=None, reason=None):
if isinstance(request, _RequestProxy):
self.request = request.request
else:
self.request = request
self.code = code
self.reason = reason or httputil.responses.get(code, "Unknown")
if headers is not None:
self.headers = headers
else:
self.headers = httputil.HTTPHeaders()
self.buffer = buffer
self._body = None
if effective_url is None:
self.effective_url = request.url
else:
self.effective_url = effective_url
if error is None:
if self.code < 200 or self.code >= 300:
self.error = HTTPError(self.code, message=self.reason,
response=self)
else:
self.error = None
else:
self.error = error
self.request_time = request_time
self.time_info = time_info or {}
def _get_body(self):
if self.buffer is None:
return None
elif self._body is None:
self._body = self.buffer.getvalue()
return self._body
body = property(_get_body)
def rethrow(self):
"""If there was an error on the request, raise an `HTTPError`."""
if self.error:
raise self.error
def __repr__(self):
args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items()))
return "%s(%s)" % (self.__class__.__name__, args)
class HTTPError(Exception):
"""Exception thrown for an unsuccessful HTTP request.
Attributes:
* ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is
used when no HTTP response was received, e.g. for a timeout.
* ``response`` - `HTTPResponse` object, if any.
Note that if ``follow_redirects`` is False, redirects become HTTPErrors,
and you can look at ``error.response.headers['Location']`` to see the
destination of the redirect.
"""
def __init__(self, code, message=None, response=None):
self.code = code
message = message or httputil.responses.get(code, "Unknown")
self.response = response
Exception.__init__(self, "HTTP %d: %s" % (self.code, message))
class _RequestProxy(object):
"""Combines an object with a dictionary of defaults.
Used internally by AsyncHTTPClient implementations.
"""
def __init__(self, request, defaults):
self.request = request
self.defaults = defaults
def __getattr__(self, name):
request_attr = getattr(self.request, name)
if request_attr is not None:
return request_attr
elif self.defaults is not None:
return self.defaults.get(name, None)
else:
return None
def main():
from tornado.options import define, options, parse_command_line
define("print_headers", type=bool, default=False)
define("print_body", type=bool, default=True)
define("follow_redirects", type=bool, default=True)
define("validate_cert", type=bool, default=True)
args = parse_command_line()
client = HTTPClient()
for arg in args:
try:
response = client.fetch(arg,
follow_redirects=options.follow_redirects,
validate_cert=options.validate_cert,
)
except HTTPError as e:
if e.response is not None:
response = e.response
else:
raise
if options.print_headers:
print(response.headers)
if options.print_body:
print(native_str(response.body))
client.close()
if __name__ == "__main__":
main()
|
apache-2.0
|
joeyb/joeyb-blog
|
externals/pygments/styles/autumn.py
|
24
|
2123
|
# -*- coding: utf-8 -*-
"""
pygments.styles.autumn
~~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
:copyright: 2006-2007 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class AutumnStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
default_style = ""
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #aaaaaa',
Comment.Preproc: 'noitalic #4c8317',
Comment.Special: 'italic #0000aa',
Keyword: '#0000aa',
Keyword.Type: '#00aaaa',
Operator.Word: '#0000aa',
Name.Builtin: '#00aaaa',
Name.Function: '#00aa00',
Name.Class: 'underline #00aa00',
Name.Namespace: 'underline #00aaaa',
Name.Variable: '#aa0000',
Name.Constant: '#aa0000',
Name.Entity: 'bold #800',
Name.Attribute: '#1e90ff',
Name.Tag: 'bold #1e90ff',
Name.Decorator: '#888888',
String: '#aa5500',
String.Symbol: '#0000aa',
String.Regex: '#009999',
Number: '#009999',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
|
mit
|
kblin/supybot-gsoc
|
src/log.py
|
5
|
15607
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import sys
import time
import types
import atexit
import logging
import operator
import textwrap
import traceback
import supybot.ansi as ansi
import supybot.conf as conf
import supybot.utils as utils
import supybot.registry as registry
import supybot.ircutils as ircutils
deadlyExceptions = [KeyboardInterrupt, SystemExit]
###
# This is for testing, of course. Mostly is just disables the firewall code
# so exceptions can propagate.
###
testing = False
class Formatter(logging.Formatter):
_fmtConf = staticmethod(lambda : conf.supybot.log.format())
def formatTime(self, record, datefmt=None):
return timestamp(record.created)
def formatException(self, (E, e, tb)):
for exn in deadlyExceptions:
if issubclass(e.__class__, exn):
raise
return logging.Formatter.formatException(self, (E, e, tb))
def format(self, record):
self._fmt = self._fmtConf()
return logging.Formatter.format(self, record)
class PluginFormatter(Formatter):
_fmtConf = staticmethod(lambda : conf.supybot.log.plugins.format())
class Logger(logging.Logger):
def exception(self, *args):
(E, e, tb) = sys.exc_info()
tbinfo = traceback.extract_tb(tb)
path = '[%s]' % '|'.join(map(operator.itemgetter(2), tbinfo))
eStrId = '%s:%s' % (E, path)
eId = hex(hash(eStrId) & 0xFFFFF)
logging.Logger.exception(self, *args)
self.error('Exception id: %s', eId)
# The traceback should be sufficient if we want it.
# self.error('Exception string: %s', eStrId)
def _log(self, level, msg, args, exc_info=None):
msg = format(msg, *args)
logging.Logger._log(self, level, msg, (), exc_info=exc_info)
class StdoutStreamHandler(logging.StreamHandler):
def format(self, record):
s = logging.StreamHandler.format(self, record)
if record.levelname != 'ERROR' and conf.supybot.log.stdout.wrap():
# We check for ERROR there because otherwise, tracebacks (which are
# already wrapped by Python itself) wrap oddly.
if not isinstance(record.levelname, basestring):
print record
print record.levelname
print utils.stackTrace()
prefixLen = len(record.levelname) + 1 # ' '
s = textwrap.fill(s, width=78, subsequent_indent=' '*prefixLen)
s.rstrip('\r\n')
return s
def emit(self, record):
if conf.supybot.log.stdout() and not conf.daemonized:
try:
logging.StreamHandler.emit(self, record)
except ValueError, e: # Raised if sys.stdout is closed.
self.disable()
error('Error logging to stdout. Removing stdout handler.')
exception('Uncaught exception in StdoutStreamHandler:')
def disable(self):
self.setLevel(sys.maxint) # Just in case.
_logger.removeHandler(self)
logging._acquireLock()
try:
del logging._handlers[self]
finally:
logging._releaseLock()
class BetterFileHandler(logging.FileHandler):
def emit(self, record):
msg = self.format(record)
if not hasattr(types, "UnicodeType"): #if no unicode support...
self.stream.write(msg)
self.stream.write(os.linesep)
else:
try:
self.stream.write(msg)
self.stream.write(os.linesep)
except UnicodeError:
self.stream.write(msg.encode("utf8"))
self.stream.write(os.linesep)
self.flush()
class ColorizedFormatter(Formatter):
# This was necessary because these variables aren't defined until later.
# The staticmethod is necessary because they get treated like methods.
_fmtConf = staticmethod(lambda : conf.supybot.log.stdout.format())
def formatException(self, (E, e, tb)):
if conf.supybot.log.stdout.colorized():
return ''.join([ansi.RED,
Formatter.formatException(self, (E, e, tb)),
ansi.RESET])
else:
return Formatter.formatException(self, (E, e, tb))
def format(self, record, *args, **kwargs):
if conf.supybot.log.stdout.colorized():
color = ''
if record.levelno == logging.CRITICAL:
color = ansi.WHITE + ansi.BOLD
elif record.levelno == logging.ERROR:
color = ansi.RED
elif record.levelno == logging.WARNING:
color = ansi.YELLOW
if color:
return ''.join([color,
Formatter.format(self, record, *args, **kwargs),
ansi.RESET])
else:
return Formatter.format(self, record, *args, **kwargs)
else:
return Formatter.format(self, record, *args, **kwargs)
conf.registerGlobalValue(conf.supybot.directories, 'log',
conf.Directory('logs', """Determines what directory the bot will store its
logfiles in."""))
_logDir = conf.supybot.directories.log()
if not os.path.exists(_logDir):
os.mkdir(_logDir, 0755)
pluginLogDir = os.path.join(_logDir, 'plugins')
if not os.path.exists(pluginLogDir):
os.mkdir(pluginLogDir, 0755)
try:
messagesLogFilename = os.path.join(_logDir, 'messages.log')
_handler = BetterFileHandler(messagesLogFilename)
except EnvironmentError, e:
raise SystemExit, \
'Error opening messages logfile (%s). ' \
'Generally, this is because you are running Supybot in a directory ' \
'you don\'t have permissions to add files in, or you\'re running ' \
'Supybot as a different user than you normal do. The original ' \
'error was: %s' % (messagesLogFilename, utils.gen.exnToString(e))
# These are public.
formatter = Formatter('NEVER SEEN; IF YOU SEE THIS, FILE A BUG!')
pluginFormatter = PluginFormatter('NEVER SEEN; IF YOU SEE THIS, FILE A BUG!')
# These are not.
logging.setLoggerClass(Logger)
_logger = logging.getLogger('supybot')
_stdoutHandler = StdoutStreamHandler(sys.stdout)
class ValidLogLevel(registry.String):
"""Invalid log level."""
handler = None
minimumLevel = -1
def set(self, s):
s = s.upper()
try:
level = logging._levelNames[s]
except KeyError:
try:
level = int(s)
except ValueError:
self.error()
if level < self.minimumLevel:
self.error()
if self.handler is not None:
self.handler.setLevel(level)
self.setValue(level)
def __str__(self):
# The str() is necessary here; apparently getLevelName returns an
# integer on occasion. logging--
level = str(logging.getLevelName(self.value))
if level.startswith('Level'):
level = level.split()[-1]
return level
class LogLevel(ValidLogLevel):
"""Invalid log level. Value must be either DEBUG, INFO, WARNING,
ERROR, or CRITICAL."""
handler = _handler
class StdoutLogLevel(ValidLogLevel):
"""Invalid log level. Value must be either DEBUG, INFO, WARNING,
ERROR, or CRITICAL."""
handler = _stdoutHandler
conf.registerGroup(conf.supybot, 'log')
conf.registerGlobalValue(conf.supybot.log, 'format',
registry.String('%(levelname)s %(asctime)s %(name)s %(message)s',
"""Determines what the bot's logging format will be. The relevant
documentation on the available formattings is Python's documentation on
its logging module."""))
conf.registerGlobalValue(conf.supybot.log, 'level',
LogLevel(logging.INFO, """Determines what the minimum priority level logged
to file will be. Do note that this value does not affect the level logged
to stdout; for that, you should set the value of supybot.log.stdout.level.
Valid values are DEBUG, INFO, WARNING, ERROR, and CRITICAL, in order of
increasing priority."""))
conf.registerGlobalValue(conf.supybot.log, 'timestampFormat',
registry.String('%Y-%m-%dT%H:%M:%S', """Determines the format string for
timestamps in logfiles. Refer to the Python documentation for the time
module to see what formats are accepted. If you set this variable to the
empty string, times will be logged in a simple seconds-since-epoch
format."""))
class BooleanRequiredFalseOnWindows(registry.Boolean):
def set(self, s):
registry.Boolean.set(self, s)
if self.value and os.name == 'nt':
raise registry.InvalidRegistryValue, \
'Value cannot be true on Windows.'
conf.registerGlobalValue(conf.supybot.log, 'stdout',
registry.Boolean(True, """Determines whether the bot will log to
stdout."""))
conf.registerGlobalValue(conf.supybot.log.stdout, 'colorized',
BooleanRequiredFalseOnWindows(False, """Determines whether the bot's logs
to stdout (if enabled) will be colorized with ANSI color."""))
conf.registerGlobalValue(conf.supybot.log.stdout, 'wrap',
registry.Boolean(True, """Determines whether the bot will wrap its logs
when they're output to stdout."""))
conf.registerGlobalValue(conf.supybot.log.stdout, 'format',
registry.String('%(levelname)s %(asctime)s %(message)s',
"""Determines what the bot's logging format will be. The relevant
documentation on the available formattings is Python's documentation on
its logging module."""))
conf.registerGlobalValue(conf.supybot.log.stdout, 'level',
StdoutLogLevel(logging.INFO, """Determines what the minimum priority level
logged will be. Valid values are DEBUG, INFO, WARNING, ERROR, and
CRITICAL, in order of increasing priority."""))
conf.registerGroup(conf.supybot.log, 'plugins')
conf.registerGlobalValue(conf.supybot.log.plugins, 'individualLogfiles',
registry.Boolean(False, """Determines whether the bot will separate plugin
logs into their own individual logfiles."""))
conf.registerGlobalValue(conf.supybot.log.plugins, 'format',
registry.String('%(levelname)s %(asctime)s %(message)s',
"""Determines what the bot's logging format will be. The relevant
documentation on the available formattings is Python's documentation on
its logging module."""))
# These just make things easier.
debug = _logger.debug
info = _logger.info
warning = _logger.warning
error = _logger.error
critical = _logger.critical
exception = _logger.exception
# These were just begging to be replaced.
registry.error = error
registry.exception = exception
setLevel = _logger.setLevel
atexit.register(logging.shutdown)
# ircutils will work without this, but it's useful.
ircutils.debug = debug
def getPluginLogger(name):
if not conf.supybot.log.plugins.individualLogfiles():
return _logger
log = logging.getLogger('supybot.plugins.%s' % name)
if not log.handlers:
filename = os.path.join(pluginLogDir, '%s.log' % name)
handler = BetterFileHandler(filename)
handler.setLevel(-1)
handler.setFormatter(pluginFormatter)
log.addHandler(handler)
if name in sys.modules:
log.info('Starting log for %s.', name)
return log
def timestamp(when=None):
if when is None:
when = time.time()
format = conf.supybot.log.timestampFormat()
t = time.localtime(when)
if format:
return time.strftime(format, t)
else:
return str(int(time.mktime(t)))
def firewall(f, errorHandler=None):
def logException(self, s=None):
if s is None:
s = 'Uncaught exception'
if hasattr(self, 'log'):
self.log.exception('%s:', s)
else:
exception('%s in %s.%s:', s, self.__class__.__name__, f.func_name)
def m(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except Exception, e:
if testing:
raise
logException(self)
if errorHandler is not None:
try:
return errorHandler(self, *args, **kwargs)
except Exception, e:
logException(self, 'Uncaught exception in errorHandler')
m = utils.python.changeFunctionName(m, f.func_name, f.__doc__)
return m
class MetaFirewall(type):
def __new__(cls, name, bases, classdict):
firewalled = {}
for base in bases:
if hasattr(base, '__firewalled__'):
cls.updateFirewalled(firewalled, base.__firewalled__)
cls.updateFirewalled(firewalled, classdict.get('__firewalled__', []))
for (attr, errorHandler) in firewalled.iteritems():
if attr in classdict:
classdict[attr] = firewall(classdict[attr], errorHandler)
return super(MetaFirewall, cls).__new__(cls, name, bases, classdict)
def getErrorHandler(cls, dictOrTuple, name):
if isinstance(dictOrTuple, dict):
return dictOrTuple[name]
else:
return None
getErrorHandler = classmethod(getErrorHandler)
def updateFirewalled(cls, firewalled, __firewalled__):
for attr in __firewalled__:
firewalled[attr] = cls.getErrorHandler(__firewalled__, attr)
updateFirewalled = classmethod(updateFirewalled)
class PluginLogFilter(logging.Filter):
def filter(self, record):
if conf.supybot.log.plugins.individualLogfiles():
if record.name.startswith('supybot.plugins'):
return False
return True
_handler.setFormatter(formatter)
_handler.addFilter(PluginLogFilter())
_handler.setLevel(conf.supybot.log.level())
_logger.addHandler(_handler)
_logger.setLevel(-1)
_stdoutFormatter = ColorizedFormatter('IF YOU SEE THIS, FILE A BUG!')
_stdoutHandler.setFormatter(_stdoutFormatter)
_stdoutHandler.setLevel(conf.supybot.log.stdout.level())
if not conf.daemonized:
_logger.addHandler(_stdoutHandler)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
bsd-3-clause
|
rosmo/ansible
|
lib/ansible/plugins/lookup/varnames.py
|
50
|
2238
|
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: varnames
author: Ansible Core
version_added: "2.8"
short_description: Lookup matching variable names
description:
- Retrieves a list of matching Ansible variable names.
options:
_terms:
description: List of Python regex patterns to search for in variable names.
required: True
"""
EXAMPLES = """
- name: List variables that start with qz_
debug: msg="{{ lookup('varnames', '^qz_.+')}}"
vars:
qz_1: hello
qz_2: world
qa_1: "I won't show"
qz_: "I won't show either"
- name: Show all variables
debug: msg="{{ lookup('varnames', '.+')}}"
- name: Show variables with 'hosts' in their names
debug: msg="{{ lookup('varnames', 'hosts')}}"
- name: Find several related variables that end specific way
debug: msg="{{ lookup('varnames', '.+_zone$', '.+_location$') }}"
"""
RETURN = """
_value:
description:
- List of the variable names requested.
type: list
"""
import re
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.module_utils.six import string_types
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if variables is None:
raise AnsibleError('No variables available to search')
# no options, yet
# self.set_options(direct=kwargs)
ret = []
variable_names = list(variables.keys())
for term in terms:
if not isinstance(term, string_types):
raise AnsibleError('Invalid setting identifier, "%s" is not a string, its a %s' % (term, type(term)))
try:
name = re.compile(term)
except Exception as e:
raise AnsibleError('Unable to use "%s" as a search parameter: %s' % (term, to_native(e)))
for varname in variable_names:
if name.search(varname):
ret.append(varname)
return ret
|
gpl-3.0
|
discosultan/quake-console
|
Samples/Sandbox/Lib/encodings/cp874.py
|
593
|
12851
|
""" Python Character Mapping Codec cp874 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP874.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp874',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\ufffe' # 0x82 -> UNDEFINED
u'\ufffe' # 0x83 -> UNDEFINED
u'\ufffe' # 0x84 -> UNDEFINED
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\ufffe' # 0x86 -> UNDEFINED
u'\ufffe' # 0x87 -> UNDEFINED
u'\ufffe' # 0x88 -> UNDEFINED
u'\ufffe' # 0x89 -> UNDEFINED
u'\ufffe' # 0x8A -> UNDEFINED
u'\ufffe' # 0x8B -> UNDEFINED
u'\ufffe' # 0x8C -> UNDEFINED
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\ufffe' # 0x99 -> UNDEFINED
u'\ufffe' # 0x9A -> UNDEFINED
u'\ufffe' # 0x9B -> UNDEFINED
u'\ufffe' # 0x9C -> UNDEFINED
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\ufffe' # 0x9F -> UNDEFINED
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe' # 0xDB -> UNDEFINED
u'\ufffe' # 0xDC -> UNDEFINED
u'\ufffe' # 0xDD -> UNDEFINED
u'\ufffe' # 0xDE -> UNDEFINED
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe' # 0xFC -> UNDEFINED
u'\ufffe' # 0xFD -> UNDEFINED
u'\ufffe' # 0xFE -> UNDEFINED
u'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
mit
|
kyonetca/onionshare
|
onionshare/web.py
|
1
|
7477
|
# -*- coding: utf-8 -*-
"""
OnionShare | https://onionshare.org/
Copyright (C) 2014 Micah Lee <micah@micahflee.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import Queue, mimetypes, platform, os, sys, zipfile, urllib2
from flask import Flask, Response, request, render_template_string, abort
import strings, helpers
app = Flask(__name__)
# information about the file
file_info = []
zip_filename = None
zip_filesize = None
def set_file_info(filenames):
global file_info, zip_filename, zip_filesize
# build file info list
file_info = {'files': [], 'dirs': []}
for filename in filenames:
info = {
'filename': filename,
'basename': os.path.basename(filename.rstrip('/'))
}
if os.path.isfile(filename):
info['size'] = os.path.getsize(filename)
info['size_human'] = helpers.human_readable_filesize(info['size'])
file_info['files'].append(info)
if os.path.isdir(filename):
info['size'] = helpers.dir_size(filename)
info['size_human'] = helpers.human_readable_filesize(info['size'])
file_info['dirs'].append(info)
file_info['files'] = sorted(file_info['files'], key=lambda k: k['basename'])
file_info['dirs'] = sorted(file_info['dirs'], key=lambda k: k['basename'])
# zip up the files and folders
z = helpers.ZipWriter()
for info in file_info['files']:
z.add_file(info['filename'])
for info in file_info['dirs']:
z.add_dir(info['filename'])
z.close()
zip_filename = z.zip_filename
zip_filesize = os.path.getsize(zip_filename)
REQUEST_LOAD = 0
REQUEST_DOWNLOAD = 1
REQUEST_PROGRESS = 2
REQUEST_OTHER = 3
REQUEST_CANCELED = 4
q = Queue.Queue()
def add_request(type, path, data=None):
global q
q.put({
'type': type,
'path': path,
'data': data
})
slug = helpers.random_string(16)
download_count = 0
stay_open = False
def set_stay_open(new_stay_open):
global stay_open
stay_open = new_stay_open
def get_stay_open():
return stay_open
def debug_mode():
import logging
if platform.system() == 'Windows':
temp_dir = os.environ['Temp'].replace('\\', '/')
else:
temp_dir = '/tmp/'
log_handler = logging.FileHandler('{0}/onionshare_server.log'.format(temp_dir))
log_handler.setLevel(logging.WARNING)
app.logger.addHandler(log_handler)
@app.route("/<slug_candidate>")
def index(slug_candidate):
if not helpers.constant_time_compare(slug.encode('ascii'), slug_candidate.encode('ascii')):
abort(404)
add_request(REQUEST_LOAD, request.path)
return render_template_string(
open('{0}/index.html'.format(helpers.get_onionshare_dir())).read(),
slug=slug,
file_info=file_info,
filename=os.path.basename(zip_filename).decode("utf-8"),
filesize=zip_filesize,
filesize_human=helpers.human_readable_filesize(zip_filesize),
strings=strings.strings
)
@app.route("/<slug_candidate>/download")
def download(slug_candidate):
global download_count
if not helpers.constant_time_compare(slug.encode('ascii'), slug_candidate.encode('ascii')):
abort(404)
# each download has a unique id
download_id = download_count
download_count += 1
# prepare some variables to use inside generate() function below
# which is outside of the request context
shutdown_func = request.environ.get('werkzeug.server.shutdown')
path = request.path
# tell GUI the download started
add_request(REQUEST_DOWNLOAD, path, {'id': download_id})
dirname = os.path.dirname(zip_filename)
basename = os.path.basename(zip_filename)
def generate():
chunk_size = 102400 # 100kb
fp = open(zip_filename, 'rb')
done = False
canceled = False
while not done:
chunk = fp.read(102400)
if chunk == '':
done = True
else:
try:
yield chunk
# tell GUI the progress
downloaded_bytes = fp.tell()
percent = round((1.0 * downloaded_bytes / zip_filesize) * 100, 2)
sys.stdout.write(
"\r{0}, {1}% ".format(helpers.human_readable_filesize(downloaded_bytes), percent))
sys.stdout.flush()
add_request(REQUEST_PROGRESS, path, {'id': download_id, 'bytes': downloaded_bytes})
except:
# looks like the download was canceled
done = True
canceled = True
# tell the GUI the download has canceled
add_request(REQUEST_CANCELED, path, {'id': download_id})
fp.close()
sys.stdout.write("\n")
# download is finished, close the server
if not stay_open and not canceled:
print strings._("closing_automatically")
if shutdown_func is None:
raise RuntimeError('Not running with the Werkzeug Server')
shutdown_func()
r = Response(generate())
r.headers.add('Content-Length', zip_filesize)
r.headers.add('Content-Disposition', 'attachment', filename=basename)
# guess content type
(content_type, _) = mimetypes.guess_type(basename, strict=False)
if content_type is not None:
r.headers.add('Content-Type', content_type)
return r
@app.errorhandler(404)
def page_not_found(e):
add_request(REQUEST_OTHER, request.path)
return render_template_string(open('{0}/404.html'.format(helpers.get_onionshare_dir())).read())
# shutting down the server only works within the context of flask, so the easiest way to do it is over http
shutdown_slug = helpers.random_string(16)
@app.route("/<shutdown_slug_candidate>/shutdown")
def shutdown(shutdown_slug_candidate):
if not helpers.constant_time_compare(shutdown_slug.encode('ascii'), shutdown_slug_candidate.encode('ascii')):
abort(404)
# shutdown the flask service
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return ""
def start(port, stay_open=False):
set_stay_open(stay_open)
app.run(port=port, threaded=True)
def stop(port):
# to stop flask, load http://127.0.0.1:<port>/<shutdown_slug>/shutdown
if helpers.get_platform() == 'Tails':
# in Tails everything is proxies over Tor, so we need to get lower level
# to connect not over the proxy
import socket
s = socket.socket()
s.connect(('127.0.0.1', port))
s.sendall('GET /{0}/shutdown HTTP/1.1\r\n\r\n'.format(shutdown_slug))
else:
urllib2.urlopen('http://127.0.0.1:{0}/{1}/shutdown'.format(port, shutdown_slug)).read()
|
gpl-3.0
|
kmad1729/website
|
django/utils/dictconfig.py
|
335
|
22939
|
# This is a copy of the Python logging.config.dictconfig module,
# reproduced with permission. It is provided here for backwards
# compatibility for Python versions prior to 2.7.
#
# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import logging.handlers
import re
import sys
import types
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
#
# This function is defined in logging only in recent versions of Python
#
try:
from logging import _checkLevel
except ImportError:
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in logging._levelNames:
raise ValueError('Unknown level: %r' % level)
rv = logging._levelNames[level]
else:
raise TypeError('Level not an integer or a '
'valid string: %r' % level)
return rv
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, basestring): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
# incremental handler config only if handler name
# ties in to logging._handlers (Python 2.7)
if sys.version_info[:2] == (2, 7):
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(_checkLevel(level))
except StandardError, e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError, e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError, e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except StandardError, e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError, e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError, e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = root.manager.loggerDict.keys()
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and\
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError, e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError, e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError, te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError, e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError, e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
config['target'] = self.config['handlers'][config['target']]
except StandardError, e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError, te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(_checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError, e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(_checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
|
bsd-3-clause
|
cntnboys/410Lab6
|
v1/lib/python2.7/site-packages/django/contrib/gis/tests/geo3d/tests.py
|
62
|
12075
|
from __future__ import unicode_literals
import os
import re
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import postgis
from django.test import TestCase
from django.utils._os import upath
if HAS_GEOS:
from django.contrib.gis.db.models import Union, Extent3D
from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon
from .models import (City3D, Interstate2D, Interstate3D, InterstateProj2D,
InterstateProj3D, Point2D, Point3D, MultiPoint3D, Polygon2D, Polygon3D)
if HAS_GDAL:
from django.contrib.gis.utils import LayerMapping, LayerMapError
data_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = dict((name, coords) for name, coords in city_data)
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
(11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_data = (
'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,942051.75 4208366.38,941527.97 4225693.20))',
(21.71, 13.21, 9.12, 16.40, 21.71)
)
@skipUnless(HAS_GEOS and HAS_GDAL and postgis, "Geos, GDAL and postgis are required.")
class Geo3DTest(TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
http://postgis.refractions.net/documentation/manual-1.4/ch08.html#PostGIS_3D_Functions
"""
def _load_interstate_data(self):
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
line_2d = LineString([l[:2] for l in line_3d.coords], srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
def _load_city_data(self):
for name, pnt_data in city_data:
City3D.objects.create(name=name, point=Point(*pnt_data, srid=4326))
def _load_polygon_data(self):
bbox_wkt, bbox_z = bbox_data
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
Polygon2D.objects.create(name='2D BBox', poly=bbox_2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox_3d)
def test_3d_hasz(self):
"""
Make sure data is 3D and has expected Z values -- shouldn't change
because of coordinate system.
"""
self._load_interstate_data()
for name, line, exp_z in interstate_data:
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
self._load_city_data()
for name, pnt_data in city_data:
city = City3D.objects.get(name=name)
z = pnt_data[2]
self.assertTrue(city.point.hasz)
self.assertEqual(z, city.point.z)
def test_3d_polygons(self):
"""
Test the creation of polygon 3D models.
"""
self._load_polygon_data()
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertIsInstance(p3d.poly, Polygon)
self.assertEqual(p3d.poly.srid, 32140)
def test_3d_layermapping(self):
"""
Testing LayerMapping on 3D models.
"""
point_mapping = {'point': 'POINT'}
mpoint_mapping = {'mpoint': 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
self.assertRaises(LayerMapError, LayerMapping,
Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
def test_kml(self):
"""
Test GeoQuerySet.kml() with Z values.
"""
self._load_city_data()
h = City3D.objects.kml(precision=6).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test_geojson(self):
"""
Test GeoQuerySet.geojson() with Z values.
"""
self._load_city_data()
h = City3D.objects.geojson(precision=6).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
def test_union(self):
"""
Testing the Union aggregate of 3D models.
"""
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
self._load_city_data()
ref_ewkt = 'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)'
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union('point'))['point__union']
self.assertTrue(union.hasz)
self.assertEqual(ref_union, union)
def test_extent(self):
"""
Testing the Extent3D aggregate for 3D models.
"""
self._load_city_data()
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14, 174.783117, 48.462611, 1433)
extent1 = City3D.objects.aggregate(Extent3D('point'))['point__extent3d']
extent2 = City3D.objects.extent3d()
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
for e3d in [extent1, extent2]:
check_extent3d(e3d)
def test_perimeter(self):
"""
Testing GeoQuerySet.perimeter() on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
self.assertAlmostEqual(ref_perim_2d,
Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m,
tol)
self.assertAlmostEqual(ref_perim_3d,
Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m,
tol)
def test_length(self):
"""
Testing GeoQuerySet.length() on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
self.assertAlmostEqual(ref_length_2d,
Interstate2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
Interstate3D.objects.length().get(name='I-45').length.m,
tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
self.assertAlmostEqual(ref_length_2d,
InterstateProj2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
InterstateProj3D.objects.length().get(name='I-45').length.m,
tol)
def test_scale(self):
"""
Testing GeoQuerySet.scale() on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.scale(1.0, 1.0, zscale):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test_translate(self):
"""
Testing GeoQuerySet.translate() on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.translate(0, 0, ztrans):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
|
apache-2.0
|
mdrumond/tensorflow
|
tensorflow/contrib/slim/python/slim/data/prefetch_queue_test.py
|
76
|
8616
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.prefetch_queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.slim.python.slim.data import prefetch_queue
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
class PrefetchQueueTest(test.TestCase):
def testOneThread(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=1)
batches = prefetch_queue.prefetch_queue(batches).dequeue()
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = sess.run(batches)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertEquals(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEquals(results[2].shape, (batch_size, 1))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testMultiThread(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=4)
batches = prefetch_queue.prefetch_queue(batches).dequeue()
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
value_counter = []
for _ in range(num_batches):
results = sess.run(batches)
value_counter.append(results[0])
self.assertEqual(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEqual(results[2].shape, (batch_size, 1))
self.assertAllEqual(
np.sort(np.concatenate(value_counter)),
np.arange(0, num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testMultipleDequeue(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 4
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=4)
batcher = prefetch_queue.prefetch_queue(batches)
batches_list = [batcher.dequeue() for _ in range(2)]
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
value_counter = []
for _ in range(int(num_batches / 2)):
for batches in batches_list:
results = sess.run(batches)
value_counter.append(results[0])
self.assertEquals(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEquals(results[2].shape, (batch_size, 1))
self.assertAllEqual(
np.sort(np.concatenate(value_counter)),
np.arange(0, num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testDynamicPad_failure(self):
with ops.Graph().as_default():
variable_tensor = array_ops.placeholder(dtypes.int32, shape=[None, 3])
with self.assertRaisesRegexp(ValueError, 'shapes must be fully defined'):
prefetch_queue.prefetch_queue([variable_tensor])
def testDynamicPad(self):
with self.test_session() as sess:
# Create 3 tensors of variable but compatible shapes.
var_shape = [None, 2]
p1 = constant_op.constant([[1, 2], [3, 4]])
p1.set_shape(var_shape)
p2 = constant_op.constant([[5, 6], [7, 8], [9, 10]])
p2.set_shape(var_shape)
p3 = constant_op.constant([[11, 12]])
p3.set_shape(var_shape)
batch = [p1, p2, p3]
batch_size = len(batch)
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(batch_size)
# Create a PaddingFIFOQueue to enqueue these tensors.
q = data_flow_ops.PaddingFIFOQueue(
capacity=10, dtypes=[dtypes.int32], shapes=[var_shape])
for tensor in [p1, p2, p3]:
q.enqueue([tensor]).run()
# Dequeue from the queue and batch them using batch().
batches = input_lib.batch([q.dequeue(), counter], batch_size=batch_size,
num_threads=1, dynamic_pad=True)
self.assertEqual([batch_size, None, 2], batches[0].shape.as_list())
# Finally, assemble them into prefetch_queue with dynamic_pad.
batcher = prefetch_queue.prefetch_queue(batches, dynamic_pad=True)
batches = batcher.dequeue()
self.assertEqual([batch_size, None, 2], batches[0].shape.as_list())
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
values, _ = sess.run(batches)
# We enqueued 3 tensors of [None, 2] shapes, so using dynamic_pad
# they should be padded to the fixed size [3, 3, 2], where 3
# is the maximum length of the batch.
self.assertTrue(np.array_equal(
np.array([[[1, 2], [3, 4], [0, 0]],
[[5, 6], [7, 8], [9, 10]],
[[11, 12], [0, 0], [0, 0]]]),
values))
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testDictConstruction(self):
with ops.Graph().as_default():
batches = {
'first': constant_op.constant([1]),
'second': constant_op.constant([2.0, 2.1])
}
prefetcher = prefetch_queue.prefetch_queue(batches)
dequeued = prefetcher.dequeue()
self.assertTrue(isinstance(dequeued, dict))
self.assertEqual(2, len(dequeued))
self.assertEqual(dtypes.int32, dequeued['first'].dtype)
self.assertEqual(dtypes.float32, dequeued['second'].dtype)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
vpramo/contrail-sandesh
|
library/python/pysandesh/example/pysandesh_example/generator.py
|
6
|
4239
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import gevent
from gevent import monkey; monkey.patch_all()
from pysandesh.sandesh_base import *
from gen_py.generator_msg.ttypes import *
from pysandesh_example.gen_py.vn.ttypes import *
from pysandesh_example.gen_py.vm.ttypes import *
import sandesh_req_impl
import socket
class generator(object):
def __init__(self):
self._sandesh_instance = Sandesh()
sandesh_req_impl.bind_handle_request_impl()
#end __init__
def run_generator(self):
self._sandesh_instance.init_generator('PysandeshExample', socket.gethostname(),
'Example', '0', ['127.0.0.1:8086'], 'generator_context', 9090,
['pysandesh_example'])
self._sandesh_instance.set_logging_params(enable_local_log = True,
level = SandeshLevel.SYS_EMERG)
self._sandesh_instance.trace_buffer_create(name = "PysandeshTraceBuf",
size = 1000)
send_uve_task = gevent.spawn(self._send_uve_sandesh)
gevent.joinall([send_uve_task])
print 'We shoud not see this message!'
#end run_generator
def _send_sandesh(self):
while True:
gen_msg = GeneratorMsg(sandesh=self._sandesh_instance)
gen_msg.send(sandesh=self._sandesh_instance)
m1 = msg1(1, 2, 3, "msg1_string1", 10, sandesh=self._sandesh_instance)
m1.send(sandesh=self._sandesh_instance)
m4 = msg4(name='msg1', sandesh=self._sandesh_instance)
m4.trace_msg(name='PysandeshTraceBuf',
sandesh=self._sandesh_instance)
st11 = struct1(10, "struct1_string1")
st12 = struct1(20, "struct1_string2")
m2 = msg2(st11, st12, sandesh=self._sandesh_instance)
m2.send(sandesh=self._sandesh_instance)
m4 = msg4(name='msg2', sandesh=self._sandesh_instance)
m4.trace_msg(name='PysandeshTraceBuf',
sandesh=self._sandesh_instance)
s_list = ['str1', 'str2', 'str3']
i_list = [11, 22]
stl1 = []
stl1.append(st11)
stl1.append(st12)
st21 = struct2(stl1, 44)
m3 = msg3(s_list, i_list, st21, sandesh=self._sandesh_instance)
m3.send(sandesh=self._sandesh_instance)
m4 = msg4(name='msg3', sandesh=self._sandesh_instance)
m4.trace_msg(name='PysandeshTraceBuf',
sandesh=self._sandesh_instance)
gevent.sleep(20)
#end _send_sandesh
def _send_uve_sandesh(self):
count = 0
while True:
count += 1
vn_cfg = UveVirtualNetworkConfig()
vn_cfg.name = 'sandesh-corp:vn45'
vn_cfg.total_interfaces = count*2
vn_cfg.total_virtual_machines = count
uve_cfg_vn = UveVirtualNetworkConfigTrace(data=vn_cfg,
sandesh=self._sandesh_instance)
uve_cfg_vn.send(sandesh=self._sandesh_instance)
m4 = msg4(name='UveVirtualNetworkConfigTrace', sandesh=self._sandesh_instance)
m4.trace_msg(name='PysandeshTraceBuf',
sandesh=self._sandesh_instance)
vm_if = VmInterfaceAgent()
vm_if.name = 'vhost0'
vm_if.in_pkts = count;
vm_if.in_bytes = count*10;
vm_agent = UveVirtualMachineAgent()
vm_agent.name = 'sandesh-corp:vm-dns'
vm_agent.interface_list = []
vm_agent.interface_list.append(vm_if)
uve_agent_vm = UveVirtualMachineAgentTrace(data=vm_agent,
sandesh=self._sandesh_instance)
uve_agent_vm.send(sandesh=self._sandesh_instance)
m4 = msg4(name='UveVirtualMachineAgentTrace', sandesh=self._sandesh_instance)
m4.trace_msg(name='PysandeshTraceBuf',
sandesh=self._sandesh_instance)
gevent.sleep(10)
#end _send_uve_sandesh
#end class generator
gen = generator()
gen.run_generator()
|
apache-2.0
|
vitan/hue
|
desktop/core/ext-py/Pygments-1.3.1/pygments/cmdline.py
|
75
|
13055
|
# -*- coding: utf-8 -*-
"""
pygments.cmdline
~~~~~~~~~~~~~~~~
Command line interface.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import getopt
from textwrap import dedent
from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline
from pygments.lexers import get_all_lexers, get_lexer_by_name, get_lexer_for_filename, \
find_lexer_class, guess_lexer, TextLexer
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
get_formatter_for_filename, find_formatter_class, \
TerminalFormatter # pylint:disable-msg=E0611
from pygments.filters import get_all_filters, find_filter_class
from pygments.styles import get_all_styles, get_style_by_name
USAGE = """\
Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>]
[-O <options>] [-P <option=value>] [-o <outfile>] [<infile>]
%s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>]
%s -L [<which> ...]
%s -N <filename>
%s -H <type> <name>
%s -h | -V
Highlight the input file and write the result to <outfile>.
If no input file is given, use stdin, if -o is not given, use stdout.
<lexer> is a lexer name (query all lexer names with -L). If -l is not
given, the lexer is guessed from the extension of the input file name
(this obviously doesn't work if the input is stdin). If -g is passed,
attempt to guess the lexer from the file contents, or pass through as
plain text if this fails (this can work for stdin).
Likewise, <formatter> is a formatter name, and will be guessed from
the extension of the output file name. If no output file is given,
the terminal formatter will be used by default.
With the -O option, you can give the lexer and formatter a comma-
separated list of options, e.g. ``-O bg=light,python=cool``.
The -P option adds lexer and formatter options like the -O option, but
you can only give one option per -P. That way, the option value may
contain commas and equals signs, which it can't with -O, e.g.
``-P "heading=Pygments, the Python highlighter".
With the -F option, you can add filters to the token stream, you can
give options in the same way as for -O after a colon (note: there must
not be spaces around the colon).
The -O, -P and -F options can be given multiple times.
With the -S option, print out style definitions for style <style>
for formatter <formatter>. The argument given by -a is formatter
dependent.
The -L option lists lexers, formatters, styles or filters -- set
`which` to the thing you want to list (e.g. "styles"), or omit it to
list everything.
The -N option guesses and prints out a lexer name based solely on
the given filename. It does not take input or highlight anything.
If no specific lexer can be determined "text" is returned.
The -H option prints detailed help for the object <name> of type <type>,
where <type> is one of "lexer", "formatter" or "filter".
The -h option prints this help.
The -V option prints the package version.
"""
def _parse_options(o_strs):
opts = {}
if not o_strs:
return opts
for o_str in o_strs:
if not o_str:
continue
o_args = o_str.split(',')
for o_arg in o_args:
o_arg = o_arg.strip()
try:
o_key, o_val = o_arg.split('=')
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
opts[o_arg] = True
else:
opts[o_key] = o_val
return opts
def _parse_filters(f_strs):
filters = []
if not f_strs:
return filters
for f_str in f_strs:
if ':' in f_str:
fname, fopts = f_str.split(':', 1)
filters.append((fname, _parse_options([fopts])))
else:
filters.append((f_str, {}))
return filters
def _print_help(what, name):
try:
if what == 'lexer':
cls = find_lexer_class(name)
print "Help on the %s lexer:" % cls.name
print dedent(cls.__doc__)
elif what == 'formatter':
cls = find_formatter_class(name)
print "Help on the %s formatter:" % cls.name
print dedent(cls.__doc__)
elif what == 'filter':
cls = find_filter_class(name)
print "Help on the %s filter:" % name
print dedent(cls.__doc__)
except AttributeError:
print >>sys.stderr, "%s not found!" % what
def _print_list(what):
if what == 'lexer':
print
print "Lexers:"
print "~~~~~~~"
info = []
for fullname, names, exts, _ in get_all_lexers():
tup = (', '.join(names)+':', fullname,
exts and '(filenames ' + ', '.join(exts) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print ('* %s\n %s %s') % i
elif what == 'formatter':
print
print "Formatters:"
print "~~~~~~~~~~~"
info = []
for cls in get_all_formatters():
doc = docstring_headline(cls)
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print ('* %s\n %s %s') % i
elif what == 'filter':
print
print "Filters:"
print "~~~~~~~~"
for name in get_all_filters():
cls = find_filter_class(name)
print "* " + name + ':'
print " %s" % docstring_headline(cls)
elif what == 'style':
print
print "Styles:"
print "~~~~~~~"
for name in get_all_styles():
cls = get_style_by_name(name)
print "* " + name + ':'
print " %s" % docstring_headline(cls)
def main(args=sys.argv):
"""
Main command line entry point.
"""
# pylint: disable-msg=R0911,R0912,R0915
usage = USAGE % ((args[0],) * 6)
try:
popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:hVHg")
except getopt.GetoptError, err:
print >>sys.stderr, usage
return 2
opts = {}
O_opts = []
P_opts = []
F_opts = []
for opt, arg in popts:
if opt == '-O':
O_opts.append(arg)
elif opt == '-P':
P_opts.append(arg)
elif opt == '-F':
F_opts.append(arg)
opts[opt] = arg
if not opts and not args:
print usage
return 0
if opts.pop('-h', None) is not None:
print usage
return 0
if opts.pop('-V', None) is not None:
print 'Pygments version %s, (c) 2006-2008 by Georg Brandl.' % __version__
return 0
# handle ``pygmentize -L``
L_opt = opts.pop('-L', None)
if L_opt is not None:
if opts:
print >>sys.stderr, usage
return 2
# print version
main(['', '-V'])
if not args:
args = ['lexer', 'formatter', 'filter', 'style']
for arg in args:
_print_list(arg.rstrip('s'))
return 0
# handle ``pygmentize -H``
H_opt = opts.pop('-H', None)
if H_opt is not None:
if opts or len(args) != 2:
print >>sys.stderr, usage
return 2
what, name = args
if what not in ('lexer', 'formatter', 'filter'):
print >>sys.stderr, usage
return 2
_print_help(what, name)
return 0
# parse -O options
parsed_opts = _parse_options(O_opts)
opts.pop('-O', None)
# parse -P options
for p_opt in P_opts:
try:
name, value = p_opt.split('=', 1)
except ValueError:
parsed_opts[p_opt] = True
else:
parsed_opts[name] = value
opts.pop('-P', None)
# handle ``pygmentize -N``
infn = opts.pop('-N', None)
if infn is not None:
try:
lexer = get_lexer_for_filename(infn, **parsed_opts)
except ClassNotFound, err:
lexer = TextLexer()
except OptionError, err:
print >>sys.stderr, 'Error:', err
return 1
print lexer.aliases[0]
return 0
# handle ``pygmentize -S``
S_opt = opts.pop('-S', None)
a_opt = opts.pop('-a', None)
if S_opt is not None:
f_opt = opts.pop('-f', None)
if not f_opt:
print >>sys.stderr, usage
return 2
if opts or args:
print >>sys.stderr, usage
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
except ClassNotFound, err:
print >>sys.stderr, err
return 1
arg = a_opt or ''
try:
print fmter.get_style_defs(arg)
except Exception, err:
print >>sys.stderr, 'Error:', err
return 1
return 0
# if no -S is given, -a is not allowed
if a_opt is not None:
print >>sys.stderr, usage
return 2
# parse -F options
F_opts = _parse_filters(F_opts)
opts.pop('-F', None)
# select formatter
outfn = opts.pop('-o', None)
fmter = opts.pop('-f', None)
if fmter:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
except (OptionError, ClassNotFound), err:
print >>sys.stderr, 'Error:', err
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
except (OptionError, ClassNotFound), err:
print >>sys.stderr, 'Error:', err
return 1
try:
outfile = open(outfn, 'wb')
except Exception, err:
print >>sys.stderr, 'Error: cannot open outfile:', err
return 1
else:
if not fmter:
fmter = TerminalFormatter(**parsed_opts)
outfile = sys.stdout
# select lexer
lexer = opts.pop('-l', None)
if lexer:
try:
lexer = get_lexer_by_name(lexer, **parsed_opts)
except (OptionError, ClassNotFound), err:
print >>sys.stderr, 'Error:', err
return 1
if args:
if len(args) > 1:
print >>sys.stderr, usage
return 2
infn = args[0]
try:
code = open(infn, 'rb').read()
except Exception, err:
print >>sys.stderr, 'Error: cannot read infile:', err
return 1
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
except ClassNotFound, err:
if '-g' in opts:
try:
lexer = guess_lexer(code)
except ClassNotFound:
lexer = TextLexer()
else:
print >>sys.stderr, 'Error:', err
return 1
except OptionError, err:
print >>sys.stderr, 'Error:', err
return 1
else:
if '-g' in opts:
code = sys.stdin.read()
try:
lexer = guess_lexer(code)
except ClassNotFound:
lexer = TextLexer()
elif not lexer:
print >>sys.stderr, 'Error: no lexer name given and reading ' + \
'from stdin (try using -g or -l <lexer>)'
return 2
else:
code = sys.stdin.read()
# No encoding given? Use latin1 if output file given,
# stdin/stdout encoding otherwise.
# (This is a compromise, I'm not too happy with it...)
if 'encoding' not in parsed_opts and 'outencoding' not in parsed_opts:
if outfn:
# encoding pass-through
fmter.encoding = 'latin1'
else:
if sys.version_info < (3,):
# use terminal encoding; Python 3's terminals already do that
lexer.encoding = getattr(sys.stdin, 'encoding',
None) or 'ascii'
fmter.encoding = getattr(sys.stdout, 'encoding',
None) or 'ascii'
# ... and do it!
try:
# process filters
for fname, fopts in F_opts:
lexer.add_filter(fname, **fopts)
highlight(code, lexer, fmter, outfile)
except Exception, err:
import traceback
info = traceback.format_exception(*sys.exc_info())
msg = info[-1].strip()
if len(info) >= 3:
# extract relevant file and position info
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
print >>sys.stderr
print >>sys.stderr, '*** Error while highlighting:'
print >>sys.stderr, msg
return 1
return 0
|
apache-2.0
|
torchingloom/edx-platform
|
cms/djangoapps/contentstore/features/course-settings.py
|
21
|
7212
|
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from terrain.steps import reload_the_page
from selenium.webdriver.common.keys import Keys
from common import type_in_codemirror, upload_file
from django.conf import settings
from nose.tools import assert_true, assert_false, assert_equal # pylint: disable=E0611
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
COURSE_START_DATE_CSS = "#course-start-date"
COURSE_END_DATE_CSS = "#course-end-date"
ENROLLMENT_START_DATE_CSS = "#course-enrollment-start-date"
ENROLLMENT_END_DATE_CSS = "#course-enrollment-end-date"
COURSE_START_TIME_CSS = "#course-start-time"
COURSE_END_TIME_CSS = "#course-end-time"
ENROLLMENT_START_TIME_CSS = "#course-enrollment-start-time"
ENROLLMENT_END_TIME_CSS = "#course-enrollment-end-time"
DUMMY_TIME = "15:30"
DEFAULT_TIME = "00:00"
############### ACTIONS ####################
@step('I select Schedule and Details$')
def test_i_select_schedule_and_details(step):
world.click_course_settings()
link_css = 'li.nav-course-settings-schedule a'
world.css_click(link_css)
world.wait_for_requirejs(
["jquery", "js/models/course",
"js/models/settings/course_details", "js/views/settings/main"])
@step('I have set course dates$')
def test_i_have_set_course_dates(step):
step.given('I have opened a new course in Studio')
step.given('I select Schedule and Details')
step.given('And I set course dates')
@step('And I set course dates$')
def test_and_i_set_course_dates(step):
set_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')
set_date_or_time(COURSE_END_DATE_CSS, '12/26/2013')
set_date_or_time(ENROLLMENT_START_DATE_CSS, '12/1/2013')
set_date_or_time(ENROLLMENT_END_DATE_CSS, '12/10/2013')
set_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
set_date_or_time(ENROLLMENT_END_TIME_CSS, DUMMY_TIME)
@step('And I clear all the dates except start$')
def test_and_i_clear_all_the_dates_except_start(step):
set_date_or_time(COURSE_END_DATE_CSS, '')
set_date_or_time(ENROLLMENT_START_DATE_CSS, '')
set_date_or_time(ENROLLMENT_END_DATE_CSS, '')
@step('Then I see cleared dates$')
def test_then_i_see_cleared_dates(step):
verify_date_or_time(COURSE_END_DATE_CSS, '')
verify_date_or_time(ENROLLMENT_START_DATE_CSS, '')
verify_date_or_time(ENROLLMENT_END_DATE_CSS, '')
verify_date_or_time(COURSE_END_TIME_CSS, '')
verify_date_or_time(ENROLLMENT_START_TIME_CSS, '')
verify_date_or_time(ENROLLMENT_END_TIME_CSS, '')
# Verify course start date (required) and time still there
verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')
verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
@step('I clear the course start date$')
def test_i_clear_the_course_start_date(step):
set_date_or_time(COURSE_START_DATE_CSS, '')
@step('I receive a warning about course start date$')
def test_i_receive_a_warning_about_course_start_date(step):
assert_true(world.css_has_text('.message-error', 'The course must have an assigned start date.'))
assert_true('error' in world.css_find(COURSE_START_DATE_CSS).first._element.get_attribute('class'))
assert_true('error' in world.css_find(COURSE_START_TIME_CSS).first._element.get_attribute('class'))
@step('the previously set start date is shown$')
def test_the_previously_set_start_date_is_shown(step):
verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')
verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
@step('Given I have tried to clear the course start$')
def test_i_have_tried_to_clear_the_course_start(step):
step.given("I have set course dates")
step.given("I clear the course start date")
step.given("I receive a warning about course start date")
@step('I have entered a new course start date$')
def test_i_have_entered_a_new_course_start_date(step):
set_date_or_time(COURSE_START_DATE_CSS, '12/22/2013')
@step('The warning about course start date goes away$')
def test_the_warning_about_course_start_date_goes_away(step):
assert world.is_css_not_present('.message-error')
assert_false('error' in world.css_find(COURSE_START_DATE_CSS).first._element.get_attribute('class'))
assert_false('error' in world.css_find(COURSE_START_TIME_CSS).first._element.get_attribute('class'))
@step('my new course start date is shown$')
def new_course_start_date_is_shown(step):
verify_date_or_time(COURSE_START_DATE_CSS, '12/22/2013')
# Time should have stayed from before attempt to clear date.
verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
@step('I change fields$')
def test_i_change_fields(step):
set_date_or_time(COURSE_START_DATE_CSS, '7/7/7777')
set_date_or_time(COURSE_END_DATE_CSS, '7/7/7777')
set_date_or_time(ENROLLMENT_START_DATE_CSS, '7/7/7777')
set_date_or_time(ENROLLMENT_END_DATE_CSS, '7/7/7777')
@step('I change the course overview')
def test_change_course_overview(_step):
type_in_codemirror(0, "<h1>Overview</h1>")
@step('I click the "Upload Course Image" button')
def click_upload_button(_step):
button_css = '.action-upload-image'
world.css_click(button_css)
@step('I upload a new course image$')
def upload_new_course_image(_step):
upload_file('image.jpg')
@step('I should see the new course image$')
def i_see_new_course_image(_step):
img_css = '#course-image'
images = world.css_find(img_css)
assert len(images) == 1
img = images[0]
expected_src = '/c4x/MITx/999/asset/image.jpg'
# Don't worry about the domain in the URL
success_func = lambda _: img['src'].endswith(expected_src)
world.wait_for(success_func)
@step('the image URL should be present in the field')
def image_url_present(_step):
field_css = '#course-image-url'
expected_value = '/c4x/MITx/999/asset/image.jpg'
assert world.css_value(field_css) == expected_value
############### HELPER METHODS ####################
def set_date_or_time(css, date_or_time):
"""
Sets date or time field.
"""
world.css_fill(css, date_or_time)
e = world.css_find(css).first
# hit Enter to apply the changes
e._element.send_keys(Keys.ENTER)
def verify_date_or_time(css, date_or_time):
"""
Verifies date or time field.
"""
# We need to wait for JavaScript to fill in the field, so we use
# css_has_value(), which first checks that the field is not blank
assert_true(world.css_has_value(css, date_or_time))
@step('I do not see the changes')
@step('I see the set dates')
def i_see_the_set_dates(_step):
"""
Ensure that each field has the value set in `test_and_i_set_course_dates`.
"""
verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')
verify_date_or_time(COURSE_END_DATE_CSS, '12/26/2013')
verify_date_or_time(ENROLLMENT_START_DATE_CSS, '12/01/2013')
verify_date_or_time(ENROLLMENT_END_DATE_CSS, '12/10/2013')
verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)
# Unset times get set to 12 AM once the corresponding date has been set.
verify_date_or_time(COURSE_END_TIME_CSS, DEFAULT_TIME)
verify_date_or_time(ENROLLMENT_START_TIME_CSS, DEFAULT_TIME)
verify_date_or_time(ENROLLMENT_END_TIME_CSS, DUMMY_TIME)
|
agpl-3.0
|
xwolf12/django
|
django/contrib/admin/checks.py
|
18
|
38392
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from itertools import chain
from django.contrib.admin.utils import (
NotRelationField, flatten, get_fields_from_path,
)
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.db import models
from django.forms.models import (
BaseModelForm, BaseModelFormSet, _get_foreign_key,
)
def check_admin_app(**kwargs):
from django.contrib.admin.sites import system_check_errors
return system_check_errors
class BaseModelAdminChecks(object):
def check(self, cls, model, **kwargs):
errors = []
errors.extend(self._check_raw_id_fields(cls, model))
errors.extend(self._check_fields(cls, model))
errors.extend(self._check_fieldsets(cls, model))
errors.extend(self._check_exclude(cls, model))
errors.extend(self._check_form(cls, model))
errors.extend(self._check_filter_vertical(cls, model))
errors.extend(self._check_filter_horizontal(cls, model))
errors.extend(self._check_radio_fields(cls, model))
errors.extend(self._check_prepopulated_fields(cls, model))
errors.extend(self._check_view_on_site_url(cls, model))
errors.extend(self._check_ordering(cls, model))
errors.extend(self._check_readonly_fields(cls, model))
return errors
def _check_raw_id_fields(self, cls, model):
""" Check that `raw_id_fields` only contains field names that are listed
on the model. """
if not isinstance(cls.raw_id_fields, (list, tuple)):
return must_be('a list or tuple', option='raw_id_fields', obj=cls, id='admin.E001')
else:
return list(chain(*[
self._check_raw_id_fields_item(cls, model, field_name, 'raw_id_fields[%d]' % index)
for index, field_name in enumerate(cls.raw_id_fields)
]))
def _check_raw_id_fields_item(self, cls, model, field_name, label):
""" Check an item of `raw_id_fields`, i.e. check that field named
`field_name` exists in model `model` and is a ForeignKey or a
ManyToManyField. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=cls, id='admin.E002')
else:
if not isinstance(field, (models.ForeignKey, models.ManyToManyField)):
return must_be('a ForeignKey or ManyToManyField',
option=label, obj=cls, id='admin.E003')
else:
return []
def _check_fields(self, cls, model):
""" Check that `fields` only refer to existing fields, doesn't contain
duplicates. Check if at most one of `fields` and `fieldsets` is defined.
"""
if cls.fields is None:
return []
elif not isinstance(cls.fields, (list, tuple)):
return must_be('a list or tuple', option='fields', obj=cls, id='admin.E004')
elif cls.fieldsets:
return [
checks.Error(
"Both 'fieldsets' and 'fields' are specified.",
hint=None,
obj=cls,
id='admin.E005',
)
]
fields = flatten(cls.fields)
if len(fields) != len(set(fields)):
return [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint=None,
obj=cls,
id='admin.E006',
)
]
return list(chain(*[
self._check_field_spec(cls, model, field_name, 'fields')
for field_name in cls.fields
]))
def _check_fieldsets(self, cls, model):
""" Check that fieldsets is properly formatted and doesn't contain
duplicates. """
if cls.fieldsets is None:
return []
elif not isinstance(cls.fieldsets, (list, tuple)):
return must_be('a list or tuple', option='fieldsets', obj=cls, id='admin.E007')
else:
return list(chain(*[
self._check_fieldsets_item(cls, model, fieldset, 'fieldsets[%d]' % index)
for index, fieldset in enumerate(cls.fieldsets)
]))
def _check_fieldsets_item(self, cls, model, fieldset, label):
""" Check an item of `fieldsets`, i.e. check that this is a pair of a
set name and a dictionary containing "fields" key. """
if not isinstance(fieldset, (list, tuple)):
return must_be('a list or tuple', option=label, obj=cls, id='admin.E008')
elif len(fieldset) != 2:
return must_be('of length 2', option=label, obj=cls, id='admin.E009')
elif not isinstance(fieldset[1], dict):
return must_be('a dictionary', option='%s[1]' % label, obj=cls, id='admin.E010')
elif 'fields' not in fieldset[1]:
return [
checks.Error(
"The value of '%s[1]' must contain the key 'fields'." % label,
hint=None,
obj=cls,
id='admin.E011',
)
]
elif not isinstance(fieldset[1]['fields'], (list, tuple)):
return must_be('a list or tuple', option="%s[1]['fields']" % label, obj=cls, id='admin.E008')
fields = flatten(fieldset[1]['fields'])
if len(fields) != len(set(fields)):
return [
checks.Error(
"There are duplicate field(s) in '%s[1]'." % label,
hint=None,
obj=cls,
id='admin.E012',
)
]
return list(chain(*[
self._check_field_spec(cls, model, fieldset_fields, '%s[1]["fields"]' % label)
for fieldset_fields in fieldset[1]['fields']
]))
def _check_field_spec(self, cls, model, fields, label):
""" `fields` should be an item of `fields` or an item of
fieldset[1]['fields'] for any `fieldset` in `fieldsets`. It should be a
field name or a tuple of field names. """
if isinstance(fields, tuple):
return list(chain(*[
self._check_field_spec_item(cls, model, field_name, "%s[%d]" % (label, index))
for index, field_name in enumerate(fields)
]))
else:
return self._check_field_spec_item(cls, model, fields, label)
def _check_field_spec_item(self, cls, model, field_name, label):
if field_name in cls.readonly_fields:
# Stuff can be put in fields that isn't actually a model field if
# it's in readonly_fields, readonly_fields will handle the
# validation of such things.
return []
else:
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
# If we can't find a field on the model that matches, it could
# be an extra field on the form.
return []
else:
if (isinstance(field, models.ManyToManyField) and
not field.remote_field.through._meta.auto_created):
return [
checks.Error(
("The value of '%s' cannot include the ManyToManyField '%s', "
"because that field manually specifies a relationship model.")
% (label, field_name),
hint=None,
obj=cls,
id='admin.E013',
)
]
else:
return []
def _check_exclude(self, cls, model):
""" Check that exclude is a sequence without duplicates. """
if cls.exclude is None: # default value is None
return []
elif not isinstance(cls.exclude, (list, tuple)):
return must_be('a list or tuple', option='exclude', obj=cls, id='admin.E014')
elif len(cls.exclude) > len(set(cls.exclude)):
return [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint=None,
obj=cls,
id='admin.E015',
)
]
else:
return []
def _check_form(self, cls, model):
""" Check that form subclasses BaseModelForm. """
if hasattr(cls, 'form') and not issubclass(cls.form, BaseModelForm):
return must_inherit_from(parent='BaseModelForm', option='form',
obj=cls, id='admin.E016')
else:
return []
def _check_filter_vertical(self, cls, model):
""" Check that filter_vertical is a sequence of field names. """
if not hasattr(cls, 'filter_vertical'):
return []
elif not isinstance(cls.filter_vertical, (list, tuple)):
return must_be('a list or tuple', option='filter_vertical', obj=cls, id='admin.E017')
else:
return list(chain(*[
self._check_filter_item(cls, model, field_name, "filter_vertical[%d]" % index)
for index, field_name in enumerate(cls.filter_vertical)
]))
def _check_filter_horizontal(self, cls, model):
""" Check that filter_horizontal is a sequence of field names. """
if not hasattr(cls, 'filter_horizontal'):
return []
elif not isinstance(cls.filter_horizontal, (list, tuple)):
return must_be('a list or tuple', option='filter_horizontal', obj=cls, id='admin.E018')
else:
return list(chain(*[
self._check_filter_item(cls, model, field_name, "filter_horizontal[%d]" % index)
for index, field_name in enumerate(cls.filter_horizontal)
]))
def _check_filter_item(self, cls, model, field_name, label):
""" Check one item of `filter_vertical` or `filter_horizontal`, i.e.
check that given field exists and is a ManyToManyField. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=cls, id='admin.E019')
else:
if not isinstance(field, models.ManyToManyField):
return must_be('a ManyToManyField', option=label, obj=cls, id='admin.E020')
else:
return []
def _check_radio_fields(self, cls, model):
""" Check that `radio_fields` is a dictionary. """
if not hasattr(cls, 'radio_fields'):
return []
elif not isinstance(cls.radio_fields, dict):
return must_be('a dictionary', option='radio_fields', obj=cls, id='admin.E021')
else:
return list(chain(*[
self._check_radio_fields_key(cls, model, field_name, 'radio_fields') +
self._check_radio_fields_value(cls, model, val, 'radio_fields["%s"]' % field_name)
for field_name, val in cls.radio_fields.items()
]))
def _check_radio_fields_key(self, cls, model, field_name, label):
""" Check that a key of `radio_fields` dictionary is name of existing
field and that the field is a ForeignKey or has `choices` defined. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=cls, id='admin.E022')
else:
if not (isinstance(field, models.ForeignKey) or field.choices):
return [
checks.Error(
"The value of '%s' refers to '%s', which is not an "
"instance of ForeignKey, and does not have a 'choices' definition." % (
label, field_name
),
hint=None,
obj=cls,
id='admin.E023',
)
]
else:
return []
def _check_radio_fields_value(self, cls, model, val, label):
""" Check type of a value of `radio_fields` dictionary. """
from django.contrib.admin.options import HORIZONTAL, VERTICAL
if val not in (HORIZONTAL, VERTICAL):
return [
checks.Error(
"The value of '%s' must be either admin.HORIZONTAL or admin.VERTICAL." % label,
hint=None,
obj=cls,
id='admin.E024',
)
]
else:
return []
def _check_view_on_site_url(self, cls, model):
if hasattr(cls, 'view_on_site'):
if not callable(cls.view_on_site) and not isinstance(cls.view_on_site, bool):
return [
checks.Error(
"The value of 'view_on_site' must be a callable or a boolean value.",
hint=None,
obj=cls,
id='admin.E025',
)
]
else:
return []
else:
return []
def _check_prepopulated_fields(self, cls, model):
""" Check that `prepopulated_fields` is a dictionary containing allowed
field types. """
if not hasattr(cls, 'prepopulated_fields'):
return []
elif not isinstance(cls.prepopulated_fields, dict):
return must_be('a dictionary', option='prepopulated_fields', obj=cls, id='admin.E026')
else:
return list(chain(*[
self._check_prepopulated_fields_key(cls, model, field_name, 'prepopulated_fields') +
self._check_prepopulated_fields_value(cls, model, val, 'prepopulated_fields["%s"]' % field_name)
for field_name, val in cls.prepopulated_fields.items()
]))
def _check_prepopulated_fields_key(self, cls, model, field_name, label):
""" Check a key of `prepopulated_fields` dictionary, i.e. check that it
is a name of existing field and the field is one of the allowed types.
"""
forbidden_field_types = (
models.DateTimeField,
models.ForeignKey,
models.ManyToManyField
)
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=cls, id='admin.E027')
else:
if isinstance(field, forbidden_field_types):
return [
checks.Error(
"The value of '%s' refers to '%s', which must not be a DateTimeField, "
"ForeignKey or ManyToManyField." % (
label, field_name
),
hint=None,
obj=cls,
id='admin.E028',
)
]
else:
return []
def _check_prepopulated_fields_value(self, cls, model, val, label):
""" Check a value of `prepopulated_fields` dictionary, i.e. it's an
iterable of existing fields. """
if not isinstance(val, (list, tuple)):
return must_be('a list or tuple', option=label, obj=cls, id='admin.E029')
else:
return list(chain(*[
self._check_prepopulated_fields_value_item(cls, model, subfield_name, "%s[%r]" % (label, index))
for index, subfield_name in enumerate(val)
]))
def _check_prepopulated_fields_value_item(self, cls, model, field_name, label):
""" For `prepopulated_fields` equal to {"slug": ("title",)},
`field_name` is "title". """
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=cls, id='admin.E030')
else:
return []
def _check_ordering(self, cls, model):
""" Check that ordering refers to existing fields or is random. """
# ordering = None
if cls.ordering is None: # The default value is None
return []
elif not isinstance(cls.ordering, (list, tuple)):
return must_be('a list or tuple', option='ordering', obj=cls, id='admin.E031')
else:
return list(chain(*[
self._check_ordering_item(cls, model, field_name, 'ordering[%d]' % index)
for index, field_name in enumerate(cls.ordering)
]))
def _check_ordering_item(self, cls, model, field_name, label):
""" Check that `ordering` refers to existing fields. """
if field_name == '?' and len(cls.ordering) != 1:
return [
checks.Error(
("The value of 'ordering' has the random ordering marker '?', "
"but contains other fields as well."),
hint='Either remove the "?", or remove the other fields.',
obj=cls,
id='admin.E032',
)
]
elif field_name == '?':
return []
elif '__' in field_name:
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
return []
else:
if field_name.startswith('-'):
field_name = field_name[1:]
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=cls, id='admin.E033')
else:
return []
def _check_readonly_fields(self, cls, model):
""" Check that readonly_fields refers to proper attribute or field. """
if cls.readonly_fields == ():
return []
elif not isinstance(cls.readonly_fields, (list, tuple)):
return must_be('a list or tuple', option='readonly_fields', obj=cls, id='admin.E034')
else:
return list(chain(*[
self._check_readonly_fields_item(cls, model, field_name, "readonly_fields[%d]" % index)
for index, field_name in enumerate(cls.readonly_fields)
]))
def _check_readonly_fields_item(self, cls, model, field_name, label):
if callable(field_name):
return []
elif hasattr(cls, field_name):
return []
elif hasattr(model, field_name):
return []
else:
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return [
checks.Error(
"The value of '%s' is not a callable, an attribute of '%s', or an attribute of '%s.%s'." % (
label, cls.__name__, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=cls,
id='admin.E035',
)
]
else:
return []
class ModelAdminChecks(BaseModelAdminChecks):
def check(self, cls, model, **kwargs):
errors = super(ModelAdminChecks, self).check(cls, model=model, **kwargs)
errors.extend(self._check_save_as(cls, model))
errors.extend(self._check_save_on_top(cls, model))
errors.extend(self._check_inlines(cls, model))
errors.extend(self._check_list_display(cls, model))
errors.extend(self._check_list_display_links(cls, model))
errors.extend(self._check_list_filter(cls, model))
errors.extend(self._check_list_select_related(cls, model))
errors.extend(self._check_list_per_page(cls, model))
errors.extend(self._check_list_max_show_all(cls, model))
errors.extend(self._check_list_editable(cls, model))
errors.extend(self._check_search_fields(cls, model))
errors.extend(self._check_date_hierarchy(cls, model))
return errors
def _check_save_as(self, cls, model):
""" Check save_as is a boolean. """
if not isinstance(cls.save_as, bool):
return must_be('a boolean', option='save_as',
obj=cls, id='admin.E101')
else:
return []
def _check_save_on_top(self, cls, model):
""" Check save_on_top is a boolean. """
if not isinstance(cls.save_on_top, bool):
return must_be('a boolean', option='save_on_top',
obj=cls, id='admin.E102')
else:
return []
def _check_inlines(self, cls, model):
""" Check all inline model admin classes. """
if not isinstance(cls.inlines, (list, tuple)):
return must_be('a list or tuple', option='inlines', obj=cls, id='admin.E103')
else:
return list(chain(*[
self._check_inlines_item(cls, model, item, "inlines[%d]" % index)
for index, item in enumerate(cls.inlines)
]))
def _check_inlines_item(self, cls, model, inline, label):
""" Check one inline model admin. """
inline_label = '.'.join([inline.__module__, inline.__name__])
from django.contrib.admin.options import BaseModelAdmin
if not issubclass(inline, BaseModelAdmin):
return [
checks.Error(
"'%s' must inherit from 'BaseModelAdmin'." % inline_label,
hint=None,
obj=cls,
id='admin.E104',
)
]
elif not inline.model:
return [
checks.Error(
"'%s' must have a 'model' attribute." % inline_label,
hint=None,
obj=cls,
id='admin.E105',
)
]
elif not issubclass(inline.model, models.Model):
return must_be('a Model', option='%s.model' % inline_label,
obj=cls, id='admin.E106')
else:
return inline.check(model)
def _check_list_display(self, cls, model):
""" Check that list_display only contains fields or usable attributes.
"""
if not isinstance(cls.list_display, (list, tuple)):
return must_be('a list or tuple', option='list_display', obj=cls, id='admin.E107')
else:
return list(chain(*[
self._check_list_display_item(cls, model, item, "list_display[%d]" % index)
for index, item in enumerate(cls.list_display)
]))
def _check_list_display_item(self, cls, model, item, label):
if callable(item):
return []
elif hasattr(cls, item):
return []
elif hasattr(model, item):
# getattr(model, item) could be an X_RelatedObjectsDescriptor
try:
field = model._meta.get_field(item)
except FieldDoesNotExist:
try:
field = getattr(model, item)
except AttributeError:
field = None
if field is None:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not a "
"callable, an attribute of '%s', or an attribute or method on '%s.%s'." % (
label, item, cls.__name__, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=cls,
id='admin.E108',
)
]
elif isinstance(field, models.ManyToManyField):
return [
checks.Error(
"The value of '%s' must not be a ManyToManyField." % label,
hint=None,
obj=cls,
id='admin.E109',
)
]
else:
return []
else:
try:
model._meta.get_field(item)
except FieldDoesNotExist:
return [
# This is a deliberate repeat of E108; there's more than one path
# required to test this condition.
checks.Error(
"The value of '%s' refers to '%s', which is not a callable, "
"an attribute of '%s', or an attribute or method on '%s.%s'." % (
label, item, cls.__name__, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=cls,
id='admin.E108',
)
]
else:
return []
def _check_list_display_links(self, cls, model):
""" Check that list_display_links is a unique subset of list_display.
"""
if cls.list_display_links is None:
return []
elif not isinstance(cls.list_display_links, (list, tuple)):
return must_be('a list, a tuple, or None', option='list_display_links', obj=cls, id='admin.E110')
else:
return list(chain(*[
self._check_list_display_links_item(cls, model, field_name, "list_display_links[%d]" % index)
for index, field_name in enumerate(cls.list_display_links)
]))
def _check_list_display_links_item(self, cls, model, field_name, label):
if field_name not in cls.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not defined in 'list_display'." % (
label, field_name
),
hint=None,
obj=cls,
id='admin.E111',
)
]
else:
return []
def _check_list_filter(self, cls, model):
if not isinstance(cls.list_filter, (list, tuple)):
return must_be('a list or tuple', option='list_filter', obj=cls, id='admin.E112')
else:
return list(chain(*[
self._check_list_filter_item(cls, model, item, "list_filter[%d]" % index)
for index, item in enumerate(cls.list_filter)
]))
def _check_list_filter_item(self, cls, model, item, label):
"""
Check one item of `list_filter`, i.e. check if it is one of three options:
1. 'field' -- a basic field filter, possibly w/ relationships (e.g.
'field__rel')
2. ('field', SomeFieldListFilter) - a field-based list filter class
3. SomeListFilter - a non-field list filter class
"""
from django.contrib.admin import ListFilter, FieldListFilter
if callable(item) and not isinstance(item, models.Field):
# If item is option 3, it should be a ListFilter...
if not issubclass(item, ListFilter):
return must_inherit_from(parent='ListFilter', option=label,
obj=cls, id='admin.E113')
# ... but not a FieldListFilter.
elif issubclass(item, FieldListFilter):
return [
checks.Error(
"The value of '%s' must not inherit from 'FieldListFilter'." % label,
hint=None,
obj=cls,
id='admin.E114',
)
]
else:
return []
elif isinstance(item, (tuple, list)):
# item is option #2
field, list_filter_class = item
if not issubclass(list_filter_class, FieldListFilter):
return must_inherit_from(parent='FieldListFilter', option='%s[1]' % label,
obj=cls, id='admin.E115')
else:
return []
else:
# item is option #1
field = item
# Validate the field string
try:
get_fields_from_path(model, field)
except (NotRelationField, FieldDoesNotExist):
return [
checks.Error(
"The value of '%s' refers to '%s', which does not refer to a Field." % (label, field),
hint=None,
obj=cls,
id='admin.E116',
)
]
else:
return []
def _check_list_select_related(self, cls, model):
""" Check that list_select_related is a boolean, a list or a tuple. """
if not isinstance(cls.list_select_related, (bool, list, tuple)):
return must_be('a boolean, tuple or list', option='list_select_related',
obj=cls, id='admin.E117')
else:
return []
def _check_list_per_page(self, cls, model):
""" Check that list_per_page is an integer. """
if not isinstance(cls.list_per_page, int):
return must_be('an integer', option='list_per_page', obj=cls, id='admin.E118')
else:
return []
def _check_list_max_show_all(self, cls, model):
""" Check that list_max_show_all is an integer. """
if not isinstance(cls.list_max_show_all, int):
return must_be('an integer', option='list_max_show_all', obj=cls, id='admin.E119')
else:
return []
def _check_list_editable(self, cls, model):
""" Check that list_editable is a sequence of editable fields from
list_display without first element. """
if not isinstance(cls.list_editable, (list, tuple)):
return must_be('a list or tuple', option='list_editable', obj=cls, id='admin.E120')
else:
return list(chain(*[
self._check_list_editable_item(cls, model, item, "list_editable[%d]" % index)
for index, item in enumerate(cls.list_editable)
]))
def _check_list_editable_item(self, cls, model, field_name, label):
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=cls, id='admin.E121')
else:
if field_name not in cls.list_display:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=cls, id='admin.E122')
elif cls.list_display_links and field_name in cls.list_display_links:
return [
checks.Error(
"The value of '%s' cannot be in both 'list_editable' and 'list_display_links'." % field_name,
hint=None,
obj=cls,
id='admin.E123',
)
]
# Check that list_display_links is set, and that the first values of list_editable and list_display are
# not the same. See ticket #22792 for the use case relating to this.
elif (cls.list_display[0] in cls.list_editable and cls.list_display[0] != cls.list_editable[0] and
cls.list_display_links is not None):
return [
checks.Error(
"The value of '%s' refers to the first field in 'list_display' ('%s'), "
"which cannot be used unless 'list_display_links' is set." % (
label, cls.list_display[0]
),
hint=None,
obj=cls,
id='admin.E124',
)
]
elif not field.editable:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not editable through the admin." % (
label, field_name
),
hint=None,
obj=cls,
id='admin.E125',
)
]
else:
return []
def _check_search_fields(self, cls, model):
""" Check search_fields is a sequence. """
if not isinstance(cls.search_fields, (list, tuple)):
return must_be('a list or tuple', option='search_fields', obj=cls, id='admin.E126')
else:
return []
def _check_date_hierarchy(self, cls, model):
""" Check that date_hierarchy refers to DateField or DateTimeField. """
if cls.date_hierarchy is None:
return []
else:
try:
field = model._meta.get_field(cls.date_hierarchy)
except FieldDoesNotExist:
return refer_to_missing_field(option='date_hierarchy',
field=cls.date_hierarchy,
model=model, obj=cls, id='admin.E127')
else:
if not isinstance(field, (models.DateField, models.DateTimeField)):
return must_be('a DateField or DateTimeField', option='date_hierarchy',
obj=cls, id='admin.E128')
else:
return []
class InlineModelAdminChecks(BaseModelAdminChecks):
def check(self, cls, parent_model, **kwargs):
errors = super(InlineModelAdminChecks, self).check(cls, model=cls.model, **kwargs)
errors.extend(self._check_relation(cls, parent_model))
errors.extend(self._check_exclude_of_parent_model(cls, parent_model))
errors.extend(self._check_extra(cls))
errors.extend(self._check_max_num(cls))
errors.extend(self._check_min_num(cls))
errors.extend(self._check_formset(cls))
return errors
def _check_exclude_of_parent_model(self, cls, parent_model):
# Do not perform more specific checks if the base checks result in an
# error.
errors = super(InlineModelAdminChecks, self)._check_exclude(cls, parent_model)
if errors:
return []
# Skip if `fk_name` is invalid.
if self._check_relation(cls, parent_model):
return []
if cls.exclude is None:
return []
fk = _get_foreign_key(parent_model, cls.model, fk_name=cls.fk_name)
if fk.name in cls.exclude:
return [
checks.Error(
"Cannot exclude the field '%s', because it is the foreign key "
"to the parent model '%s.%s'." % (
fk.name, parent_model._meta.app_label, parent_model._meta.object_name
),
hint=None,
obj=cls,
id='admin.E201',
)
]
else:
return []
def _check_relation(self, cls, parent_model):
try:
_get_foreign_key(parent_model, cls.model, fk_name=cls.fk_name)
except ValueError as e:
return [checks.Error(e.args[0], hint=None, obj=cls, id='admin.E202')]
else:
return []
def _check_extra(self, cls):
""" Check that extra is an integer. """
if not isinstance(cls.extra, int):
return must_be('an integer', option='extra', obj=cls, id='admin.E203')
else:
return []
def _check_max_num(self, cls):
""" Check that max_num is an integer. """
if cls.max_num is None:
return []
elif not isinstance(cls.max_num, int):
return must_be('an integer', option='max_num', obj=cls, id='admin.E204')
else:
return []
def _check_min_num(self, cls):
""" Check that min_num is an integer. """
if cls.min_num is None:
return []
elif not isinstance(cls.min_num, int):
return must_be('an integer', option='min_num', obj=cls, id='admin.E205')
else:
return []
def _check_formset(self, cls):
""" Check formset is a subclass of BaseModelFormSet. """
if not issubclass(cls.formset, BaseModelFormSet):
return must_inherit_from(parent='BaseModelFormSet', option='formset',
obj=cls, id='admin.E206')
else:
return []
def must_be(type, option, obj, id):
return [
checks.Error(
"The value of '%s' must be %s." % (option, type),
hint=None,
obj=obj,
id=id,
),
]
def must_inherit_from(parent, option, obj, id):
return [
checks.Error(
"The value of '%s' must inherit from '%s'." % (option, parent),
hint=None,
obj=obj,
id=id,
),
]
def refer_to_missing_field(field, option, model, obj, id):
return [
checks.Error(
"The value of '%s' refers to '%s', which is not an attribute of '%s.%s'." % (
option, field, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=obj,
id=id,
),
]
|
bsd-3-clause
|
edmorley/django
|
django/db/migrations/serializer.py
|
24
|
13457
|
import builtins
import collections
import datetime
import decimal
import enum
import functools
import math
import re
import types
import uuid
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject
from django.utils import datetime_safe
from django.utils.functional import LazyObject, Promise
from django.utils.timezone import utc
from django.utils.version import get_docs_version
class BaseSerializer:
def __init__(self, value):
self.value = value
def serialize(self):
raise NotImplementedError('Subclasses of BaseSerializer must implement the serialize() method.')
class BaseSequenceSerializer(BaseSerializer):
def _format(self):
raise NotImplementedError('Subclasses of BaseSequenceSerializer must implement the _format() method.')
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
value = self._format()
return value % (", ".join(strings)), imports
class BaseSimpleSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), set()
class ByteTypeSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), set()
class DatetimeSerializer(BaseSerializer):
def serialize(self):
if self.value.tzinfo is not None and self.value.tzinfo != utc:
self.value = self.value.astimezone(utc)
value_repr = repr(self.value).replace("<UTC>", "utc")
if isinstance(self.value, datetime_safe.datetime):
value_repr = "datetime.%s" % value_repr
imports = ["import datetime"]
if self.value.tzinfo is not None:
imports.append("from django.utils.timezone import utc")
return value_repr, set(imports)
class DateSerializer(BaseSerializer):
def serialize(self):
value_repr = repr(self.value)
if isinstance(self.value, datetime_safe.date):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
class DecimalSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), {"from decimal import Decimal"}
class DeconstructableSerializer(BaseSerializer):
@staticmethod
def serialize_deconstructed(path, args, kwargs):
name, imports = DeconstructableSerializer._serialize_path(path)
strings = []
for arg in args:
arg_string, arg_imports = serializer_factory(arg).serialize()
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in sorted(kwargs.items()):
arg_string, arg_imports = serializer_factory(arg).serialize()
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@staticmethod
def _serialize_path(path):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = {"from django.db import models"}
name = "models.%s" % name
else:
imports = {"import %s" % module}
name = path
return name, imports
def serialize(self):
return self.serialize_deconstructed(*self.value.deconstruct())
class DictionarySerializer(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for k, v in sorted(self.value.items()):
k_string, k_imports = serializer_factory(k).serialize()
v_string, v_imports = serializer_factory(v).serialize()
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
class EnumSerializer(BaseSerializer):
def serialize(self):
enum_class = self.value.__class__
module = enum_class.__module__
imports = {"import %s" % module}
v_string, v_imports = serializer_factory(self.value.value).serialize()
imports.update(v_imports)
return "%s.%s(%s)" % (module, enum_class.__name__, v_string), imports
class FloatSerializer(BaseSimpleSerializer):
def serialize(self):
if math.isnan(self.value) or math.isinf(self.value):
return 'float("{}")'.format(self.value), set()
return super().serialize()
class FrozensetSerializer(BaseSequenceSerializer):
def _format(self):
return "frozenset([%s])"
class FunctionTypeSerializer(BaseSerializer):
def serialize(self):
if getattr(self.value, "__self__", None) and isinstance(self.value.__self__, type):
klass = self.value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, self.value.__name__), {"import %s" % module}
# Further error checking
if self.value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
if self.value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % self.value)
module_name = self.value.__module__
if '<' not in self.value.__qualname__: # Qualname can include <locals>
return '%s.%s' % (module_name, self.value.__qualname__), {'import %s' % self.value.__module__}
raise ValueError(
'Could not find function %s in %s.\n' % (self.value.__name__, module_name)
)
class FunctoolsPartialSerializer(BaseSerializer):
def serialize(self):
imports = {'import functools'}
# Serialize functools.partial() arguments
func_string, func_imports = serializer_factory(self.value.func).serialize()
args_string, args_imports = serializer_factory(self.value.args).serialize()
keywords_string, keywords_imports = serializer_factory(self.value.keywords).serialize()
# Add any imports needed by arguments
imports.update(func_imports)
imports.update(args_imports)
imports.update(keywords_imports)
return (
"functools.partial(%s, *%s, **%s)" % (
func_string, args_string, keywords_string,
),
imports,
)
class IterableSerializer(BaseSerializer):
def serialize(self):
imports = set()
strings = []
for item in self.value:
item_string, item_imports = serializer_factory(item).serialize()
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
value = "(%s)" if len(strings) != 1 else "(%s,)"
return value % (", ".join(strings)), imports
class ModelFieldSerializer(DeconstructableSerializer):
def serialize(self):
attr_name, path, args, kwargs = self.value.deconstruct()
return self.serialize_deconstructed(path, args, kwargs)
class ModelManagerSerializer(DeconstructableSerializer):
def serialize(self):
as_manager, manager_path, qs_path, args, kwargs = self.value.deconstruct()
if as_manager:
name, imports = self._serialize_path(qs_path)
return "%s.as_manager()" % name, imports
else:
return self.serialize_deconstructed(manager_path, args, kwargs)
class OperationSerializer(BaseSerializer):
def serialize(self):
from django.db.migrations.writer import OperationWriter
string, imports = OperationWriter(self.value, indentation=0).serialize()
# Nested operation, trailing comma is handled in upper OperationWriter._write()
return string.rstrip(','), imports
class RegexSerializer(BaseSerializer):
def serialize(self):
imports = {"import re"}
regex_pattern, pattern_imports = serializer_factory(self.value.pattern).serialize()
# Turn off default implicit flags (e.g. re.U) because regexes with the
# same implicit and explicit flags aren't equal.
flags = self.value.flags ^ re.compile('').flags
regex_flags, flag_imports = serializer_factory(flags).serialize()
imports.update(pattern_imports)
imports.update(flag_imports)
args = [regex_pattern]
if flags:
args.append(regex_flags)
return "re.compile(%s)" % ', '.join(args), imports
class SequenceSerializer(BaseSequenceSerializer):
def _format(self):
return "[%s]"
class SetSerializer(BaseSequenceSerializer):
def _format(self):
# Serialize as a set literal except when value is empty because {}
# is an empty dict.
return '{%s}' if self.value else 'set(%s)'
class SettingsReferenceSerializer(BaseSerializer):
def serialize(self):
return "settings.%s" % self.value.setting_name, {"from django.conf import settings"}
class TextTypeSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), set()
class TimedeltaSerializer(BaseSerializer):
def serialize(self):
return repr(self.value), {"import datetime"}
class TimeSerializer(BaseSerializer):
def serialize(self):
value_repr = repr(self.value)
if isinstance(self.value, datetime_safe.time):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
class TupleSerializer(BaseSequenceSerializer):
def _format(self):
# When len(value)==0, the empty tuple should be serialized as "()",
# not "(,)" because (,) is invalid Python syntax.
return "(%s)" if len(self.value) != 1 else "(%s,)"
class TypeSerializer(BaseSerializer):
def serialize(self):
special_cases = [
(models.Model, "models.Model", []),
]
for case, string, imports in special_cases:
if case is self.value:
return string, set(imports)
if hasattr(self.value, "__module__"):
module = self.value.__module__
if module == builtins.__name__:
return self.value.__name__, set()
else:
return "%s.%s" % (module, self.value.__name__), {"import %s" % module}
class UUIDSerializer(BaseSerializer):
def serialize(self):
return "uuid.%s" % repr(self.value), {"import uuid"}
def serializer_factory(value):
from django.db.migrations.writer import SettingsReference
if isinstance(value, Promise):
value = str(value)
elif isinstance(value, LazyObject):
# The unwrapped value is returned as the first item of the arguments
# tuple.
value = value.__reduce__()[1][0]
if isinstance(value, models.Field):
return ModelFieldSerializer(value)
if isinstance(value, models.manager.BaseManager):
return ModelManagerSerializer(value)
if isinstance(value, Operation):
return OperationSerializer(value)
if isinstance(value, type):
return TypeSerializer(value)
# Anything that knows how to deconstruct itself.
if hasattr(value, 'deconstruct'):
return DeconstructableSerializer(value)
# Unfortunately some of these are order-dependent.
if isinstance(value, frozenset):
return FrozensetSerializer(value)
if isinstance(value, list):
return SequenceSerializer(value)
if isinstance(value, set):
return SetSerializer(value)
if isinstance(value, tuple):
return TupleSerializer(value)
if isinstance(value, dict):
return DictionarySerializer(value)
if isinstance(value, enum.Enum):
return EnumSerializer(value)
if isinstance(value, datetime.datetime):
return DatetimeSerializer(value)
if isinstance(value, datetime.date):
return DateSerializer(value)
if isinstance(value, datetime.time):
return TimeSerializer(value)
if isinstance(value, datetime.timedelta):
return TimedeltaSerializer(value)
if isinstance(value, SettingsReference):
return SettingsReferenceSerializer(value)
if isinstance(value, float):
return FloatSerializer(value)
if isinstance(value, (bool, int, type(None))):
return BaseSimpleSerializer(value)
if isinstance(value, bytes):
return ByteTypeSerializer(value)
if isinstance(value, str):
return TextTypeSerializer(value)
if isinstance(value, decimal.Decimal):
return DecimalSerializer(value)
if isinstance(value, functools.partial):
return FunctoolsPartialSerializer(value)
if isinstance(value, (types.FunctionType, types.BuiltinFunctionType, types.MethodType)):
return FunctionTypeSerializer(value)
if isinstance(value, collections.Iterable):
return IterableSerializer(value)
if isinstance(value, (COMPILED_REGEX_TYPE, RegexObject)):
return RegexSerializer(value)
if isinstance(value, uuid.UUID):
return UUIDSerializer(value)
raise ValueError(
"Cannot serialize: %r\nThere are some values Django cannot serialize into "
"migration files.\nFor more, see https://docs.djangoproject.com/en/%s/"
"topics/migrations/#migration-serializing" % (value, get_docs_version())
)
|
bsd-3-clause
|
Dexhub/MTX
|
src/mem/slicc/ast/TypeFieldMethodAST.py
|
18
|
2396
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.TypeFieldAST import TypeFieldAST
class TypeFieldMethodAST(TypeFieldAST):
def __init__(self, slicc, return_type_ast, ident, type_asts, pairs,
statements = None):
super(TypeFieldMethodAST, self).__init__(slicc, pairs)
self.return_type_ast = return_type_ast
self.ident = ident
self.type_asts = type_asts
self.statements = statements
def __repr__(self):
return ""
def generate(self, type):
# Lookup return type
return_type = self.return_type_ast.type
# Lookup parameter types
types = [ t.type for t in self.type_asts ]
# Add method
if not type.addMethod(self.ident, return_type, types):
self.error("Duplicate method: %s:%s()" % (type, self.ident))
|
bsd-3-clause
|
yl565/statsmodels
|
statsmodels/tools/tests/test_grouputils.py
|
31
|
11494
|
import numpy as np
import pandas as pd
from statsmodels.tools.grouputils import Grouping
from statsmodels.tools.tools import categorical
from statsmodels.datasets import grunfeld, anes96
from pandas.util import testing as ptesting
class CheckGrouping(object):
def test_reindex(self):
# smoke test
self.grouping.reindex(self.grouping.index)
def test_count_categories(self):
self.grouping.count_categories(level=0)
np.testing.assert_equal(self.grouping.counts, self.expected_counts)
def test_sort(self):
# data frame
sorted_data, index = self.grouping.sort(self.data)
expected_sorted_data = self.data.sort_index()
ptesting.assert_frame_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, pd.DataFrame))
np.testing.assert_(not index.equals(self.grouping.index))
# make sure it copied
if hasattr(sorted_data, 'equals'): # newer pandas
np.testing.assert_(not sorted_data.equals(self.data))
# 2d arrays
sorted_data, index = self.grouping.sort(self.data.values)
np.testing.assert_array_equal(sorted_data,
expected_sorted_data.values)
np.testing.assert_(isinstance(sorted_data, np.ndarray))
# 1d series
series = self.data[self.data.columns[0]]
sorted_data, index = self.grouping.sort(series)
expected_sorted_data = series.sort_index()
ptesting.assert_series_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, pd.Series))
if hasattr(sorted_data, 'equals'):
np.testing.assert_(not sorted_data.equals(series))
# 1d array
array = series.values
sorted_data, index = self.grouping.sort(array)
expected_sorted_data = series.sort_index().values
np.testing.assert_array_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, np.ndarray))
def test_transform_dataframe(self):
names = self.data.index.names
transformed_dataframe = self.grouping.transform_dataframe(
self.data,
lambda x : x.mean(),
level=0)
expected = self.data.reset_index().groupby(names[0]
).apply(lambda x : x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_dataframe,
expected.values)
if len(names) > 1:
transformed_dataframe = self.grouping.transform_dataframe(
self.data, lambda x : x.mean(),
level=1)
expected = self.data.reset_index().groupby(names[1]
).apply(lambda x :
x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_dataframe,
expected.values)
def test_transform_array(self):
names = self.data.index.names
transformed_array = self.grouping.transform_array(
self.data.values,
lambda x : x.mean(),
level=0)
expected = self.data.reset_index().groupby(names[0]
).apply(lambda x : x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_array,
expected.values)
if len(names) > 1:
transformed_array = self.grouping.transform_array(
self.data.values,
lambda x : x.mean(), level=1)
expected = self.data.reset_index().groupby(names[1]
).apply(lambda x :
x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_array,
expected.values)
def test_transform_slices(self):
names = self.data.index.names
transformed_slices = self.grouping.transform_slices(
self.data.values,
lambda x, idx : x.mean(0),
level=0)
expected = self.data.reset_index().groupby(names[0]).mean()[
self.data.columns]
np.testing.assert_allclose(transformed_slices, expected.values,
rtol=1e-12, atol=1e-25)
if len(names) > 1:
transformed_slices = self.grouping.transform_slices(
self.data.values,
lambda x, idx : x.mean(0),
level=1)
expected = self.data.reset_index().groupby(names[1]
).mean()[
self.data.columns]
np.testing.assert_allclose(transformed_slices, expected.values,
rtol=1e-12, atol=1e-25)
def test_dummies_groups(self):
# smoke test, calls dummy_sparse under the hood
self.grouping.dummies_groups()
if len(self.grouping.group_names) > 1:
self.grouping.dummies_groups(level=1)
def test_dummy_sparse(self):
data = self.data
self.grouping.dummy_sparse()
expected = categorical(data.index.get_level_values(0).values,
drop=True)
np.testing.assert_equal(self.grouping._dummies.toarray(), expected)
if len(self.grouping.group_names) > 1:
self.grouping.dummy_sparse(level=1)
expected = categorical(data.index.get_level_values(1).values,
drop=True)
np.testing.assert_equal(self.grouping._dummies.toarray(),
expected)
class TestMultiIndexGrouping(CheckGrouping):
@classmethod
def setupClass(cls):
grun_data = grunfeld.load_pandas().data
multi_index_data = grun_data.set_index(['firm', 'year'])
multi_index_panel = multi_index_data.index
cls.grouping = Grouping(multi_index_panel)
cls.data = multi_index_data
cls.expected_counts = [20] * 11
class TestIndexGrouping(CheckGrouping):
@classmethod
def setupClass(cls):
grun_data = grunfeld.load_pandas().data
index_data = grun_data.set_index(['firm'])
index_group = index_data.index
cls.grouping = Grouping(index_group)
cls.data = index_data
cls.expected_counts = [20] * 11
def test_init_api():
# make a multi-index panel
grun_data = grunfeld.load_pandas().data
multi_index_panel = grun_data.set_index(['firm', 'year']).index
grouping = Grouping(multi_index_panel)
# check group_names
np.testing.assert_array_equal(grouping.group_names, ['firm', 'year'])
# check shape
np.testing.assert_array_equal(grouping.index_shape, (11, 20))
# check index_int
np.testing.assert_array_equal(grouping.labels,
[[ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]])
grouping = Grouping(multi_index_panel, names=['firms', 'year'])
np.testing.assert_array_equal(grouping.group_names, ['firms', 'year'])
# make a multi-index grouping
anes_data = anes96.load_pandas().data
multi_index_groups = anes_data.set_index(['educ', 'income',
'TVnews']).index
grouping = Grouping(multi_index_groups)
np.testing.assert_array_equal(grouping.group_names,
['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.index_shape, (7, 24, 8))
# make a list multi-index panel
list_panel = multi_index_panel.tolist()
grouping = Grouping(list_panel, names=['firms', 'year'])
np.testing.assert_array_equal(grouping.group_names, ['firms', 'year'])
np.testing.assert_array_equal(grouping.index_shape, (11, 20))
# make a list multi-index grouping
list_groups = multi_index_groups.tolist()
grouping = Grouping(list_groups, names=['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.group_names,
['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.index_shape, (7, 24, 8))
# single-variable index grouping
index_group = multi_index_panel.get_level_values(0)
grouping = Grouping(index_group)
# the original multi_index_panel had it's name changed inplace above
np.testing.assert_array_equal(grouping.group_names, ['firms'])
np.testing.assert_array_equal(grouping.index_shape, (220,))
# single variable list grouping
list_group = multi_index_panel.get_level_values(0).tolist()
grouping = Grouping(list_group)
np.testing.assert_array_equal(grouping.group_names, ["group0"])
np.testing.assert_array_equal(grouping.index_shape, 11*20)
# test generic group names
grouping = Grouping(list_groups)
np.testing.assert_array_equal(grouping.group_names,
['group0', 'group1', 'group2'])
|
bsd-3-clause
|
waidyanatha/sambro-eden
|
tests/climate/__init__.py
|
56
|
3002
|
ClimateDataPortal = local_import('ClimateDataPortal')
def clear_tables():
ClimateDataPortal.place.truncate()
ClimateDataPortal.rainfall_mm.truncate()
ClimateDataPortal.temperature_celsius.truncate()
db.commit()
#clear_tables()
def frange(start, end, inc=1.0):
value = start
i = 0
while True:
value = start + (i * inc)
if value >= end:
raise StopIteration
else:
yield value
i += 1
def populate_test_climate_data():
assert session.s3.debug == True
db.commit()
# create a grid of places
place_id_grid = []
# BBox coords for Nepal in degrees
for latitude in frange(26.4, 30.5, 0.3):
row = []
for longitude in frange(80.0, 88.3, 0.3):
place_id = ClimateDataPortal.place.insert(
longitude = longitude,
latitude = latitude
)
row.append(place_id)
place_id_grid.append(row)
# create a sequence of time periods.
def to_date(month_number):
return datetime.date(
month_number / 12,
(month_number % 12) + 1,
1
)
time_period_ids = list(range(
ClimateDataPortal.date_to_month_number(datetime.date(1960, 1, 1)),
ClimateDataPortal.date_to_month_number(datetime.date(2011, 1, 1)),
))
# observed samples
# pick 100 random points from the place_id grid
from random import randint
observation_place_ids = set()
for i in range(100):
random_row = place_id_grid[
randint(0,len(place_id_grid)-1)
]
random_place = random_row[
randint(0,len(random_row)-1)
]
observation_place_ids.add(random_place)
# generate samples for observed data
for observation_place_id in observation_place_ids:
for time_period_id in time_period_ids:
ClimateDataPortal.rainfall_mm.insert(
place_id = observation_place_id,
time_period = time_period_id,
sample_type = ClimateDataPortal.Observed,
value = randint(50,150)
)
ClimateDataPortal.temperature_celsius.insert(
place_id = observation_place_id,
time_period = time_period_id,
sample_type = ClimateDataPortal.Observed,
value = randint(-30, 30)
)
db.commit()
#populate_test_climate_data()
map_plugin = ClimateDataPortal.MapPlugin(
data_type_option_names = ['Observed','Gridded','Projected', 'RC Model', 'GC Model', 'Scenario'],
parameter_type_names = ['Rainfall', 'Temperature', ],
year_max = datetime.date.today().year,
year_min = 1960,
)
"""
map_plugin.get_image_overlay(
env = Storage(globals()),
"Observed",
parameter = "Rainfall",
projected_option_type = '',
from_date = datetime.date(2005, 1, 1),
to_date = datetime.date(2010, 1, 1),
statistic = 'average'
)
"""
|
mit
|
pwoodworth/intellij-community
|
python/lib/Lib/tarfile.py
|
80
|
74279
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustäbel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Read from and write to tar format archives.
"""
__version__ = "$Revision: 60730 $"
# $Source$
version = "0.8.0"
__author__ = "Lars Gustäbel (lars@gustaebel.de)"
__date__ = "$Date: 2008-02-11 10:36:07 -0800 (Mon, 11 Feb 2008) $"
__cvsid__ = "$Id: tarfile.py 60730 2008-02-11 18:36:07Z lars.gustaebel $"
__credits__ = "Gustavo Niemeyer, Niels Gustäbel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import shutil
import stat
import errno
import time
import struct
import copy
if sys.platform == 'mac':
# This module needs work for MacOS9, especially in the area of pathname
# handling. In many places it is assumed a simple substitution of / by the
# local os.path.sep is good enough to convert pathnames, but this does not
# work with the mac rooted:path:name versus :nonrooted:path:name syntax
raise ImportError, "tarfile does not work for platform==mac"
try:
import grp, pwd
except ImportError:
grp = pwd = None
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = "\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
MAGIC = "ustar" # magic tar string
VERSION = "00" # version number
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
MAXSIZE_MEMBER = 077777777777L # maximum size of a file (11 octal digits)
REGTYPE = "0" # regular file
AREGTYPE = "\0" # regular file
LNKTYPE = "1" # link (inside tarfile)
SYMTYPE = "2" # symbolic link
CHRTYPE = "3" # character special device
BLKTYPE = "4" # block special device
DIRTYPE = "5" # directory
FIFOTYPE = "6" # fifo special device
CONTTYPE = "7" # contiguous file
GNUTYPE_LONGNAME = "L" # GNU tar extension for longnames
GNUTYPE_LONGLINK = "K" # GNU tar extension for longlink
GNUTYPE_SPARSE = "S" # GNU tar extension for sparse file
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, # file types that tarfile
SYMTYPE, DIRTYPE, FIFOTYPE, # can cope with.
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
REGULAR_TYPES = (REGTYPE, AREGTYPE, # file types that somehow
CONTTYPE, GNUTYPE_SPARSE) # represent regular files
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0120000 # symbolic link
S_IFREG = 0100000 # regular file
S_IFBLK = 0060000 # block device
S_IFDIR = 0040000 # directory
S_IFCHR = 0020000 # character device
S_IFIFO = 0010000 # fifo
TSUID = 04000 # set UID on execution
TSGID = 02000 # set GID on execution
TSVTX = 01000 # reserved
TUREAD = 0400 # read by owner
TUWRITE = 0200 # write by owner
TUEXEC = 0100 # execute/search by owner
TGREAD = 0040 # read by group
TGWRITE = 0020 # write by group
TGEXEC = 0010 # execute/search by group
TOREAD = 0004 # read by other
TOWRITE = 0002 # write by other
TOEXEC = 0001 # execute/search by other
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length):
"""Convert a python string to a null-terminated string buffer.
"""
return s[:length] + (length - len(s)) * NUL
def nts(s):
"""Convert a null-terminated string field to a python string.
"""
# Use the string up to the first null char.
p = s.find("\0")
if p == -1:
return s
return s[:p]
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0200):
n = int(nts(s) or "0", 8)
else:
n = 0L
for i in xrange(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, posix=False):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = "%0*o" % (digits - 1, n) + NUL
else:
if posix:
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = ""
for i in xrange(digits - 1):
s = chr(n & 0377) + s
n >>= 8
s = chr(0200) + s
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in xrange(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
if os.sep != "/":
normpath = lambda path: os.path.normpath(path).replace(os.sep, "/")
else:
normpath = os.path.normpath
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile:
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream:
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = ""
self.pos = 0L
self.closed = False
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32("")
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = ""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", long(time.time()))
self.__write("\037\213\010\010%s\002\377" % timestamp)
if self.name.endswith(".gz"):
self.name = self.name[:-3]
self.__write(self.name + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = ""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffffL))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFFL))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = ""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != "\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != "\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in xrange(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
t = [self.dbuf]
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
buf = self.cmp.decompress(buf)
t.append(buf)
c += len(buf)
t = "".join(t)
self.dbuf = t[size:]
return t[:size]
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
t = [self.buf]
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
t.append(buf)
c += len(buf)
t = "".join(t)
self.buf = t[size:]
return t[:size]
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith("\037\213\010"):
return "gz"
if self.buf.startswith("BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = ""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
b = [self.buf]
x = len(self.buf)
while x < size:
try:
raw = self.fileobj.read(self.blocksize)
data = self.bz2obj.decompress(raw)
b.append(data)
except EOFError:
break
x += len(data)
self.buf = "".join(b)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
self.fileobj.close()
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, sparse=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.sparse = sparse
self.position = 0
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
if self.sparse is None:
return self.readnormal(size)
else:
return self.readsparse(size)
def readnormal(self, size):
"""Read operation for regular files.
"""
self.fileobj.seek(self.offset + self.position)
self.position += size
return self.fileobj.read(size)
def readsparse(self, size):
"""Read operation for sparse files.
"""
data = []
while size > 0:
buf = self.readsparsesection(size)
if not buf:
break
size -= len(buf)
data.append(buf)
return "".join(data)
def readsparsesection(self, size):
"""Read a single section of a sparse file.
"""
section = self.sparse.find(self.position)
if section is None:
return ""
size = min(size, section.offset + section.size - self.position)
if isinstance(section, _data):
realpos = section.realpos + self.position - section.offset
self.fileobj.seek(self.offset + realpos)
self.position += size
return self.fileobj.read(size)
else:
self.position += size
return NUL * size
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
getattr(tarinfo, "sparse", None))
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = ""
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = ""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = ""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if "\n" in self.buffer:
pos = self.buffer.find("\n") + 1
else:
buffers = [self.buffer]
while True:
buf = self.fileobj.read(self.blocksize)
buffers.append(buf)
if not buf or "\n" in buf:
self.buffer = "".join(buffers)
pos = self.buffer.find("\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = ""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name (dirnames must end with '/')
self.mode = 0666 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "user" # user name
self.gname = "group" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
@classmethod
def frombuf(cls, buf):
"""Construct a TarInfo object from a 512 byte string buffer.
"""
if len(buf) != BLOCKSIZE:
raise ValueError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise ValueError("empty header")
tarinfo = cls()
tarinfo.buf = buf
tarinfo.name = nts(buf[0:100])
tarinfo.mode = nti(buf[100:108])
tarinfo.uid = nti(buf[108:116])
tarinfo.gid = nti(buf[116:124])
tarinfo.size = nti(buf[124:136])
tarinfo.mtime = nti(buf[136:148])
tarinfo.chksum = nti(buf[148:156])
tarinfo.type = buf[156:157]
tarinfo.linkname = nts(buf[157:257])
tarinfo.uname = nts(buf[265:297])
tarinfo.gname = nts(buf[297:329])
tarinfo.devmajor = nti(buf[329:337])
tarinfo.devminor = nti(buf[337:345])
prefix = nts(buf[345:500])
if prefix and not tarinfo.issparse():
tarinfo.name = prefix + "/" + tarinfo.name
if tarinfo.chksum not in calc_chksums(buf):
raise ValueError("invalid header")
return tarinfo
def tobuf(self, posix=False):
"""Return a tar header as a string of 512 byte blocks.
"""
buf = ""
type = self.type
prefix = ""
if self.name.endswith("/"):
type = DIRTYPE
if type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
# Prevent "././@LongLink" from being normalized.
name = self.name
else:
name = normpath(self.name)
if type == DIRTYPE:
# directories should end with '/'
name += "/"
linkname = self.linkname
if linkname:
# if linkname is empty we end up with a '.'
linkname = normpath(linkname)
if posix:
if self.size > MAXSIZE_MEMBER:
raise ValueError("file is too large (>= 8 GB)")
if len(self.linkname) > LENGTH_LINK:
raise ValueError("linkname is too long (>%d)" % (LENGTH_LINK))
if len(name) > LENGTH_NAME:
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
else:
if len(self.linkname) > LENGTH_LINK:
buf += self._create_gnulong(self.linkname, GNUTYPE_LONGLINK)
if len(name) > LENGTH_NAME:
buf += self._create_gnulong(name, GNUTYPE_LONGNAME)
parts = [
stn(name, 100),
itn(self.mode & 07777, 8, posix),
itn(self.uid, 8, posix),
itn(self.gid, 8, posix),
itn(self.size, 12, posix),
itn(self.mtime, 12, posix),
" ", # checksum field
type,
stn(self.linkname, 100),
stn(MAGIC, 6),
stn(VERSION, 2),
stn(self.uname, 32),
stn(self.gname, 32),
itn(self.devmajor, 8, posix),
itn(self.devminor, 8, posix),
stn(prefix, 155)
]
buf += "".join(parts).ljust(BLOCKSIZE, NUL)
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + "%06o\0" % chksum + buf[-357:]
self.buf = buf
return buf
def _create_gnulong(self, name, type):
"""Create a GNU longname/longlink header from name.
It consists of an extended tar header, with the length
of the longname as size, followed by data blocks,
which contain the longname as a null terminated string.
"""
name += NUL
tarinfo = self.__class__()
tarinfo.name = "././@LongLink"
tarinfo.type = type
tarinfo.mode = 0
tarinfo.size = len(name)
# create extended header
buf = tarinfo.tobuf()
# create name blocks
buf += name
blocks, remainder = divmod(len(name), BLOCKSIZE)
if remainder > 0:
buf += (BLOCKSIZE - remainder) * NUL
return buf
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.type == GNUTYPE_SPARSE
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 0 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
posix = False # If True, generates POSIX.1-1990-compliant
# archives (no GNU extensions!)
fileobject = ExFileObject
def __init__(self, name=None, mode="r", fileobj=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self._mode = mode
self.mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
fileobj = file(name, self.mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self.mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init datastructures
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
if self._mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self._mode == "a":
# Move to the end of the archive,
# before the first empty block.
self.firstmember = None
while True:
try:
tarinfo = self.next()
except ReadError:
self.fileobj.seek(0)
break
if tarinfo is None:
self.fileobj.seek(- BLOCKSIZE, 1)
break
if self._mode in "aw":
self._loaded = True
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=20*512):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj)
except (ReadError, CompressionError):
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
t = cls(name, filemode,
_Stream(name, filemode, comptype, fileobj, bufsize))
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
fileobj = gzip.GzipFile(name, mode, compresslevel, fileobj)
try:
t = cls.taropen(name, mode, fileobj)
except IOError:
fileobj.close()
raise ReadError("not a gzip file")
t._extfileobj = False
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
extfileobj = True
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
extfileobj = False
try:
t = cls.taropen(name, mode, fileobj)
except IOError:
if not extfileobj:
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self._mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
arcname = normpath(arcname)
drv, arcname = os.path.splitdrive(arcname)
while arcname[0:1] == "/":
arcname = arcname[1:]
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = TarInfo()
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
elif hasattr(os, 'fstat'):
statres = os.fstat(fileobj.fileno())
else:
raise NotImplementedError('fileobj argument not supported on this '
'platform (no os.fstat)')
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and \
statres.st_nlink > 1 and inode in self.inodes:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
if arcname[-1:] != "/":
arcname += "/"
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if stat.S_ISREG(stmd):
tarinfo.size = statres.st_size
else:
tarinfo.size = 0L
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print filemode(tarinfo.mode),
print "%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid),
if tarinfo.ischr() or tarinfo.isblk():
print "%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)),
else:
print "%10d" % tarinfo.size,
print "%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6],
print tarinfo.name,
if verbose:
if tarinfo.issym():
print "->", tarinfo.linkname,
if tarinfo.islnk():
print "link to", tarinfo.linkname,
print
def add(self, name, arcname=None, recursive=True):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False.
"""
self._check("aw")
if arcname is None:
arcname = name
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
# Special case: The user wants to add the current
# working directory.
if name == ".":
if recursive:
if arcname == ".":
arcname = ""
for f in os.listdir("."):
self.add(f, os.path.join(arcname, f))
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = file(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f))
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.posix)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0700
self.extract(tarinfo, path)
# Reverse sort directories.
directories.sort(lambda a, b: cmp(a.name, b.name))
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path=""):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'.
"""
self._check("r")
if isinstance(member, TarInfo):
tarinfo = member
else:
tarinfo = self.getmember(member)
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name))
except EnvironmentError, e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, TarInfo):
tarinfo = member
else:
tarinfo = self.getmember(member)
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._getmember(tarinfo.linkname,
tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
if targetpath[-1:] == "/":
targetpath = targetpath[:-1]
targetpath = os.path.normpath(targetpath)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0700)
except EnvironmentError, e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.extractfile(tarinfo)
target = file(targetpath, "wb")
copyfileobj(source, target)
source.close()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
linkpath = tarinfo.linkname
try:
if tarinfo.issym():
os.symlink(linkpath, targetpath)
else:
# See extract().
os.link(tarinfo._link_target, targetpath)
except AttributeError:
if tarinfo.issym():
linkpath = os.path.join(os.path.dirname(tarinfo.name),
linkpath)
linkpath = normpath(linkpath)
try:
self._extract_member(self.getmember(linkpath), targetpath)
except (EnvironmentError, KeyError), e:
linkpath = os.path.normpath(linkpath)
try:
shutil.copy2(linkpath, targetpath)
except EnvironmentError, e:
raise IOError("link could not be created")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
try:
g = grp.getgrgid(tarinfo.gid)[2]
except KeyError:
g = os.getgid()
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
try:
u = pwd.getpwuid(tarinfo.uid)[2]
except KeyError:
u = os.getuid()
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError, e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError, e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
if sys.platform == "win32" and tarinfo.isdir():
# According to msdn.microsoft.com, it is an error (EACCES)
# to use utime() on directories.
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError, e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
while True:
buf = self.fileobj.read(BLOCKSIZE)
if not buf:
return None
try:
tarinfo = TarInfo.frombuf(buf)
# Set the TarInfo object's offset to the current position of the
# TarFile and set self.offset to the position where the data blocks
# should begin.
tarinfo.offset = self.offset
self.offset += BLOCKSIZE
tarinfo = self.proc_member(tarinfo)
except ValueError, e:
if self.ignore_zeros:
self._dbg(2, "0x%X: empty or invalid block: %s" %
(self.offset, e))
self.offset += BLOCKSIZE
continue
else:
if self.offset == 0:
raise ReadError("empty, unreadable or compressed "
"file: %s" % e)
return None
break
# Some old tar programs represent a directory as a regular
# file with a trailing slash.
if tarinfo.isreg() and tarinfo.name.endswith("/"):
tarinfo.type = DIRTYPE
# Directory names should have a '/' at the end.
if tarinfo.isdir() and not tarinfo.name.endswith("/"):
tarinfo.name += "/"
self.members.append(tarinfo)
return tarinfo
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is proc_member() which is called with a TarInfo
# object created from the header block from the current offset. The
# proc_member() method can be overridden in a subclass to add custom
# proc_*() methods. A proc_*() method MUST implement the following
# operations:
# 1. Set tarinfo.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set self.offset to the position where the next member's header will
# begin.
# 3. Return tarinfo or another valid TarInfo object.
def proc_member(self, tarinfo):
"""Choose the right processing method for tarinfo depending
on its type and call it.
"""
if tarinfo.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self.proc_gnulong(tarinfo)
elif tarinfo.type == GNUTYPE_SPARSE:
return self.proc_sparse(tarinfo)
else:
return self.proc_builtin(tarinfo)
def proc_builtin(self, tarinfo):
"""Process a builtin type member or an unknown member
which will be treated as a regular file.
"""
tarinfo.offset_data = self.offset
if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
self.offset += self._block(tarinfo.size)
return tarinfo
def proc_gnulong(self, tarinfo):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = ""
count = tarinfo.size
while count > 0:
block = self.fileobj.read(BLOCKSIZE)
buf += block
self.offset += BLOCKSIZE
count -= BLOCKSIZE
# Fetch the next header and process it.
b = self.fileobj.read(BLOCKSIZE)
t = TarInfo.frombuf(b)
t.offset = self.offset
self.offset += BLOCKSIZE
next = self.proc_member(t)
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = tarinfo.offset
if tarinfo.type == GNUTYPE_LONGNAME:
next.name = nts(buf)
elif tarinfo.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf)
return next
def proc_sparse(self, tarinfo):
"""Process a GNU sparse header plus extra headers.
"""
buf = tarinfo.buf
sp = _ringbuffer()
pos = 386
lastpos = 0L
realpos = 0L
# There are 4 possible sparse structs in the
# first header.
for i in xrange(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[482])
origsize = nti(buf[483:495])
# If the isextended flag is given,
# there are extra headers to process.
while isextended == 1:
buf = self.fileobj.read(BLOCKSIZE)
self.offset += BLOCKSIZE
pos = 0
for i in xrange(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[504])
if lastpos < origsize:
sp.append(_hole(lastpos, origsize - lastpos))
tarinfo.sparse = sp
tarinfo.offset_data = self.offset
self.offset += self._block(tarinfo.size)
tarinfo.size = origsize
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def _getmember(self, name, tarinfo=None):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
if tarinfo is None:
end = len(members)
else:
end = members.index(tarinfo)
for i in xrange(end - 1, -1, -1):
if name == members[i].name:
return members[i]
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self._mode not in mode:
raise IOError("bad operation for mode %r" % self._mode)
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print >> sys.stderr, msg
# class TarFile
class TarIter:
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def next(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
# Helper classes for sparse file support
class _section:
"""Base class for _data and _hole.
"""
def __init__(self, offset, size):
self.offset = offset
self.size = size
def __contains__(self, offset):
return self.offset <= offset < self.offset + self.size
class _data(_section):
"""Represent a data section in a sparse file.
"""
def __init__(self, offset, size, realpos):
_section.__init__(self, offset, size)
self.realpos = realpos
class _hole(_section):
"""Represent a hole section in a sparse file.
"""
pass
class _ringbuffer(list):
"""Ringbuffer class which increases performance
over a regular list.
"""
def __init__(self):
self.idx = 0
def find(self, offset):
idx = self.idx
while True:
item = self[idx]
if offset in item:
break
idx += 1
if idx == len(self):
idx = 0
if idx == self.idx:
# End of File
return None
self.idx = idx
return item
#---------------------------------------------
# zipfile compatible TarFile class
#---------------------------------------------
TAR_PLAIN = 0 # zipfile.ZIP_STORED
TAR_GZIPPED = 8 # zipfile.ZIP_DEFLATED
class TarFileCompat:
"""TarFile class compatible with standard module zipfile's
ZipFile class.
"""
def __init__(self, file, mode="r", compression=TAR_PLAIN):
if compression == TAR_PLAIN:
self.tarfile = TarFile.taropen(file, mode)
elif compression == TAR_GZIPPED:
self.tarfile = TarFile.gzopen(file, mode)
else:
raise ValueError("unknown compression constant")
if mode[0:1] == "r":
members = self.tarfile.getmembers()
for m in members:
m.filename = m.name
m.file_size = m.size
m.date_time = time.gmtime(m.mtime)[:6]
def namelist(self):
return map(lambda m: m.name, self.infolist())
def infolist(self):
return filter(lambda m: m.type in REGULAR_TYPES,
self.tarfile.getmembers())
def printdir(self):
self.tarfile.list()
def testzip(self):
return
def getinfo(self, name):
return self.tarfile.getmember(name)
def read(self, name):
return self.tarfile.extractfile(self.tarfile.getmember(name)).read()
def write(self, filename, arcname=None, compress_type=None):
self.tarfile.add(filename, arcname)
def writestr(self, zinfo, bytes):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import calendar
zinfo.name = zinfo.filename
zinfo.size = zinfo.file_size
zinfo.mtime = calendar.timegm(zinfo.date_time)
self.tarfile.addfile(zinfo, StringIO(bytes))
def close(self):
self.tarfile.close()
#class TarFileCompat
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
open = TarFile.open
|
apache-2.0
|
takeflight/django
|
django/conf/urls/i18n.py
|
113
|
1193
|
import warnings
from django.conf import settings
from django.conf.urls import patterns, url
from django.core.urlresolvers import LocaleRegexURLResolver
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.views.i18n import set_language
def i18n_patterns(prefix, *args):
"""
Adds the language code prefix to every URL pattern within this
function. This may only be used in the root URLconf, not in an included
URLconf.
"""
if isinstance(prefix, six.string_types):
warnings.warn(
"Calling i18n_patterns() with the `prefix` argument and with tuples "
"instead of django.conf.urls.url() instances is deprecated and "
"will no longer work in Django 2.0. Use a list of "
"django.conf.urls.url() instances instead.",
RemovedInDjango20Warning, stacklevel=2
)
pattern_list = patterns(prefix, *args)
else:
pattern_list = [prefix] + list(args)
if not settings.USE_I18N:
return pattern_list
return [LocaleRegexURLResolver(pattern_list)]
urlpatterns = [
url(r'^setlang/$', set_language, name='set_language'),
]
|
bsd-3-clause
|
lucasgr7/silverplate
|
crawler/tests/test_crawler.py
|
1
|
2392
|
from django.test import TestCase
from crawler.engine import LinkFinder, IngredientFinder
import urllib.request
from ..models import DataIngredient
class CrawlerTestCase(TestCase):
def test_links_finder_count(self):
"""Test the count of links in link finder is equal to the expected amount"""
finder = LinkFinder()
html = '<html><a href="http://comidaereceitas.com/teste">Link 01</a><span>Span no meio</span><a href="' \
'http://comidaereceitas.com/teste2">Link 02</a></html>'
finder.feed(html)
self.assertEqual(2, len(finder.links))
def test_push_sem_duplicates(self):
"""Test if the method in link finder (push) do not let have duplicate values for the links found"""
finder = LinkFinder()
finder.push('goku')
finder.push('vegeta')
finder.push('goku')
self.assertEqual(2, len(finder.links))
def test_ingredients_found(self):
"""Evaluate if the quantity of ingredients found on in a page is equal to the real amount expected"""
finder = IngredientFinder()
link = 'https://www.comidaereceitas.com.br/bolos/bolo-felpudo-de-coco.html'
response = urllib.request.urlopen(link)
html = response.read().decode('utf-8')
finder.feed(html)
self.assertEqual(12, finder.ingredientes, 'should be {}, is {}, stores in db {}'.format(
12,
finder.ingredientes,
str(DataIngredient.objects.all())
))
def test_way_cooking_found(self):
"""Evaluate if the quantity of way of cooking found on in a page is equal to the real amount expected"""
finder = IngredientFinder()
link = 'https://www.comidaereceitas.com.br/bolos/bolo-felpudo-de-coco.html'
response = urllib.request.urlopen(link)
html = response.read().decode('utf-8')
finder.feed(html)
self.assertEqual(8, finder.passos)
def test_filter_only_recipe(self):
"""Evaluate if the Data Finder only download data from pages evaulated as real recipes and not info pages"""
finder = IngredientFinder()
link = 'https://www.comidaereceitas.com.br/informacoes/politica-de-privacidade.html'
response = urllib.request.urlopen(link)
html = response.read().decode('utf-8')
finder.feed(html)
self.assertEqual(0, finder.ingredientes)
|
mit
|
hackersql/sq1map
|
plugins/dbms/maxdb/takeover.py
|
3
|
1050
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.exception import SqlmapUnsupportedFeatureException
from plugins.generic.takeover import Takeover as GenericTakeover
class Takeover(GenericTakeover):
def __init__(self):
GenericTakeover.__init__(self)
def osCmd(self):
errMsg = "on SAP MaxDB it is not possible to execute commands"
raise SqlmapUnsupportedFeatureException(errMsg)
def osShell(self):
errMsg = "on SAP MaxDB it is not possible to execute commands"
raise SqlmapUnsupportedFeatureException(errMsg)
def osPwn(self):
errMsg = "on SAP MaxDB it is not possible to establish an "
errMsg += "out-of-band connection"
raise SqlmapUnsupportedFeatureException(errMsg)
def osSmb(self):
errMsg = "on SAP MaxDB it is not possible to establish an "
errMsg += "out-of-band connection"
raise SqlmapUnsupportedFeatureException(errMsg)
|
gpl-3.0
|
c0defreak/python-for-android
|
python-modules/twisted/twisted/internet/_pollingfile.py
|
59
|
8276
|
# -*- test-case-name: twisted.internet.test.test_pollingfile -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implements a simple polling interface for file descriptors that don't work with
select() - this is pretty much only useful on Windows.
"""
from zope.interface import implements
from twisted.internet.interfaces import IConsumer, IPushProducer
MIN_TIMEOUT = 0.000000001
MAX_TIMEOUT = 0.1
class _PollableResource:
active = True
def activate(self):
self.active = True
def deactivate(self):
self.active = False
class _PollingTimer:
# Everything is private here because it is really an implementation detail.
def __init__(self, reactor):
self.reactor = reactor
self._resources = []
self._pollTimer = None
self._currentTimeout = MAX_TIMEOUT
self._paused = False
def _addPollableResource(self, res):
self._resources.append(res)
self._checkPollingState()
def _checkPollingState(self):
for resource in self._resources:
if resource.active:
self._startPolling()
break
else:
self._stopPolling()
def _startPolling(self):
if self._pollTimer is None:
self._pollTimer = self._reschedule()
def _stopPolling(self):
if self._pollTimer is not None:
self._pollTimer.cancel()
self._pollTimer = None
def _pause(self):
self._paused = True
def _unpause(self):
self._paused = False
self._checkPollingState()
def _reschedule(self):
if not self._paused:
return self.reactor.callLater(self._currentTimeout, self._pollEvent)
def _pollEvent(self):
workUnits = 0.
anyActive = []
for resource in self._resources:
if resource.active:
workUnits += resource.checkWork()
# Check AFTER work has been done
if resource.active:
anyActive.append(resource)
newTimeout = self._currentTimeout
if workUnits:
newTimeout = self._currentTimeout / (workUnits + 1.)
if newTimeout < MIN_TIMEOUT:
newTimeout = MIN_TIMEOUT
else:
newTimeout = self._currentTimeout * 2.
if newTimeout > MAX_TIMEOUT:
newTimeout = MAX_TIMEOUT
self._currentTimeout = newTimeout
if anyActive:
self._pollTimer = self._reschedule()
# If we ever (let's hope not) need the above functionality on UNIX, this could
# be factored into a different module.
import win32pipe
import win32file
import win32api
import pywintypes
class _PollableReadPipe(_PollableResource):
implements(IPushProducer)
def __init__(self, pipe, receivedCallback, lostCallback):
# security attributes for pipes
self.pipe = pipe
self.receivedCallback = receivedCallback
self.lostCallback = lostCallback
def checkWork(self):
finished = 0
fullDataRead = []
while 1:
try:
buffer, bytesToRead, result = win32pipe.PeekNamedPipe(self.pipe, 1)
# finished = (result == -1)
if not bytesToRead:
break
hr, data = win32file.ReadFile(self.pipe, bytesToRead, None)
fullDataRead.append(data)
except win32api.error:
finished = 1
break
dataBuf = ''.join(fullDataRead)
if dataBuf:
self.receivedCallback(dataBuf)
if finished:
self.cleanup()
return len(dataBuf)
def cleanup(self):
self.deactivate()
self.lostCallback()
def close(self):
try:
win32api.CloseHandle(self.pipe)
except pywintypes.error:
# You can't close std handles...?
pass
def stopProducing(self):
self.close()
def pauseProducing(self):
self.deactivate()
def resumeProducing(self):
self.activate()
FULL_BUFFER_SIZE = 64 * 1024
class _PollableWritePipe(_PollableResource):
implements(IConsumer)
def __init__(self, writePipe, lostCallback):
self.disconnecting = False
self.producer = None
self.producerPaused = 0
self.streamingProducer = 0
self.outQueue = []
self.writePipe = writePipe
self.lostCallback = lostCallback
try:
win32pipe.SetNamedPipeHandleState(writePipe,
win32pipe.PIPE_NOWAIT,
None,
None)
except pywintypes.error:
# Maybe it's an invalid handle. Who knows.
pass
def close(self):
self.disconnecting = True
def bufferFull(self):
if self.producer is not None:
self.producerPaused = 1
self.producer.pauseProducing()
def bufferEmpty(self):
if self.producer is not None and ((not self.streamingProducer) or
self.producerPaused):
self.producer.producerPaused = 0
self.producer.resumeProducing()
return True
return False
# almost-but-not-quite-exact copy-paste from abstract.FileDescriptor... ugh
def registerProducer(self, producer, streaming):
"""Register to receive data from a producer.
This sets this selectable to be a consumer for a producer. When this
selectable runs out of data on a write() call, it will ask the producer
to resumeProducing(). A producer should implement the IProducer
interface.
FileDescriptor provides some infrastructure for producer methods.
"""
if self.producer is not None:
raise RuntimeError(
"Cannot register producer %s, because producer %s was never "
"unregistered." % (producer, self.producer))
if not self.active:
producer.stopProducing()
else:
self.producer = producer
self.streamingProducer = streaming
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
"""Stop consuming data from a producer, without disconnecting.
"""
self.producer = None
def writeConnectionLost(self):
self.deactivate()
try:
win32api.CloseHandle(self.writePipe)
except pywintypes.error:
# OMG what
pass
self.lostCallback()
def writeSequence(self, seq):
self.outQueue.extend(seq)
def write(self, data):
if self.disconnecting:
return
self.outQueue.append(data)
if sum(map(len, self.outQueue)) > FULL_BUFFER_SIZE:
self.bufferFull()
def checkWork(self):
numBytesWritten = 0
if not self.outQueue:
if self.disconnecting:
self.writeConnectionLost()
return 0
try:
win32file.WriteFile(self.writePipe, '', None)
except pywintypes.error:
self.writeConnectionLost()
return numBytesWritten
while self.outQueue:
data = self.outQueue.pop(0)
errCode = 0
if isinstance(data, unicode):
raise TypeError("unicode not allowed")
try:
errCode, nBytesWritten = win32file.WriteFile(self.writePipe,
data, None)
except win32api.error:
self.writeConnectionLost()
break
else:
# assert not errCode, "wtf an error code???"
numBytesWritten += nBytesWritten
if len(data) > nBytesWritten:
self.outQueue.insert(0, data[nBytesWritten:])
break
else:
resumed = self.bufferEmpty()
if not resumed and self.disconnecting:
self.writeConnectionLost()
return numBytesWritten
|
apache-2.0
|
miptliot/edx-platform
|
openedx/core/djangoapps/user_api/tests/test_middleware.py
|
152
|
4415
|
"""Tests for user API middleware"""
from mock import Mock, patch
from unittest import TestCase
from django.http import HttpResponse
from django.test.client import RequestFactory
from student.tests.factories import UserFactory, AnonymousUserFactory
from ..tests.factories import UserCourseTagFactory
from ..middleware import UserTagsEventContextMiddleware
class TagsMiddlewareTest(TestCase):
"""
Test the UserTagsEventContextMiddleware
"""
def setUp(self):
super(TagsMiddlewareTest, self).setUp()
self.middleware = UserTagsEventContextMiddleware()
self.user = UserFactory.create()
self.other_user = UserFactory.create()
self.course_id = 'mock/course/id'
self.request_factory = RequestFactory()
# TODO: Make it so we can use reverse. Appears to fail depending on the order in which tests are run
#self.request = RequestFactory().get(reverse('courseware', kwargs={'course_id': self.course_id}))
self.request = RequestFactory().get('/courses/{}/courseware'.format(self.course_id))
self.request.user = self.user
self.response = Mock(spec=HttpResponse)
patcher = patch('openedx.core.djangoapps.user_api.middleware.tracker')
self.tracker = patcher.start()
self.addCleanup(patcher.stop)
def process_request(self):
"""
Execute process request using the request, and verify that it returns None
so that the request continues.
"""
# Middleware should pass request through
self.assertEquals(self.middleware.process_request(self.request), None)
def assertContextSetTo(self, context):
"""Asserts UserTagsEventContextMiddleware.CONTEXT_NAME matches ``context``"""
self.tracker.get_tracker.return_value.enter_context.assert_called_with( # pylint: disable=maybe-no-member
UserTagsEventContextMiddleware.CONTEXT_NAME,
context
)
def test_tag_context(self):
for key, value in (('int_value', 1), ('str_value', "two")):
UserCourseTagFactory.create(
course_id=self.course_id,
user=self.user,
key=key,
value=value,
)
UserCourseTagFactory.create(
course_id=self.course_id,
user=self.other_user,
key="other_user",
value="other_user_value"
)
UserCourseTagFactory.create(
course_id='other/course/id',
user=self.user,
key="other_course",
value="other_course_value"
)
self.process_request()
self.assertContextSetTo({
'course_id': self.course_id,
'course_user_tags': {
'int_value': '1',
'str_value': 'two',
}
})
def test_no_tags(self):
self.process_request()
self.assertContextSetTo({'course_id': self.course_id, 'course_user_tags': {}})
def test_not_course_url(self):
self.request = self.request_factory.get('/not/a/course/url')
self.request.user = self.user
self.process_request()
self.assertContextSetTo({})
def test_invalid_course_id(self):
self.request = self.request_factory.get('/courses/edX/101/')
self.request.user = self.user
self.process_request()
self.assertContextSetTo({})
def test_anonymous_user(self):
self.request.user = AnonymousUserFactory()
self.process_request()
self.assertContextSetTo({'course_id': self.course_id, 'course_user_tags': {}})
def test_remove_context(self):
get_tracker = self.tracker.get_tracker # pylint: disable=maybe-no-member
exit_context = get_tracker.return_value.exit_context
# The middleware should clean up the context when the request is done
self.assertEquals(
self.middleware.process_response(self.request, self.response),
self.response
)
exit_context.assert_called_with(UserTagsEventContextMiddleware.CONTEXT_NAME)
exit_context.reset_mock()
# Even if the tracker blows up, the middleware should still return the response
get_tracker.side_effect = Exception
self.assertEquals(
self.middleware.process_response(self.request, self.response),
self.response
)
|
agpl-3.0
|
StellarCN/py-stellar-base
|
stellar_sdk/xdr/asset_alpha_num12.py
|
1
|
2052
|
# This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from .account_id import AccountID
from .asset_code12 import AssetCode12
__all__ = ["AssetAlphaNum12"]
class AssetAlphaNum12:
"""
XDR Source Code
----------------------------------------------------------------
struct
{
AssetCode12 assetCode;
AccountID issuer;
}
----------------------------------------------------------------
"""
def __init__(
self,
asset_code: AssetCode12,
issuer: AccountID,
) -> None:
self.asset_code = asset_code
self.issuer = issuer
def pack(self, packer: Packer) -> None:
self.asset_code.pack(packer)
self.issuer.pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "AssetAlphaNum12":
asset_code = AssetCode12.unpack(unpacker)
issuer = AccountID.unpack(unpacker)
return cls(
asset_code=asset_code,
issuer=issuer,
)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "AssetAlphaNum12":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "AssetAlphaNum12":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return self.asset_code == other.asset_code and self.issuer == other.issuer
def __str__(self):
out = [
f"asset_code={self.asset_code}",
f"issuer={self.issuer}",
]
return f"<AssetAlphaNum12 {[', '.join(out)]}>"
|
apache-2.0
|
jittat/adm2
|
application/migrations-temp/0032_add_is_offline_to_applicant.py
|
2
|
9317
|
# -*- coding: utf-8 -*-
from south.db import db
from django.db import models
from adm.application.models import *
class Migration:
def forwards(self, orm):
# Adding field 'Applicant.is_offline'
db.add_column('application_applicant', 'is_offline', orm['application.applicant:is_offline'])
def backwards(self, orm):
# Deleting field 'Applicant.is_offline'
db.delete_column('application_applicant', 'is_offline')
models = {
'application.address': {
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'district': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'province': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'road': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'village_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'village_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'application.applicant': {
'activation_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'doc_submission_method': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'has_logged_in': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_related_model': ('IntegerListField', [], {'default': 'None'}),
'hashed_password': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_offline': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_submitted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'application.applicantaddress': {
'applicant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'address'", 'unique': 'True', 'to': "orm['application.Applicant']"}),
'contact_address': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'contact_owner'", 'unique': 'True', 'to': "orm['application.Address']"}),
'home_address': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'home_owner'", 'unique': 'True', 'to': "orm['application.Address']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'application.education': {
'anet': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'education'", 'unique': 'True', 'to': "orm['application.Applicant']"}),
'gat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'gat_date': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'gat_score_set'", 'null': 'True', 'to': "orm['application.GPExamDate']"}),
'gpax': ('django.db.models.fields.FloatField', [], {}),
'has_graduated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pat1': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pat1_date': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pat1_score_set'", 'null': 'True', 'to': "orm['application.GPExamDate']"}),
'pat3': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pat3_date': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pat3_score_set'", 'null': 'True', 'to': "orm['application.GPExamDate']"}),
'school_city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'school_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'school_province': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uses_gat_score': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'application.gpexamdate': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'month_year': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'application.major': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
'application.majorpreference': {
'applicant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'preference'", 'unique': 'True', 'to': "orm['application.Applicant']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'majors': ('IntegerListField', [], {})
},
'application.passwordrequestlog': {
'applicant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'password_request_log'", 'unique': 'True', 'to': "orm['application.Applicant']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_request_at': ('django.db.models.fields.DateTimeField', [], {}),
'num_requested_today': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'application.personalinfo': {
'applicant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'personal_info'", 'unique': 'True', 'to': "orm['application.Applicant']"}),
'birth_date': ('django.db.models.fields.DateField', [], {}),
'ethnicity': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'national_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'nationality': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '35'})
},
'application.registration': {
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10', 'blank': 'True'}),
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'registrations'", 'to': "orm['application.Applicant']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'registered_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'application.submissioninfo': {
'applicant': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'submission_info'", 'unique': 'True', 'to': "orm['application.Applicant']"}),
'applicantion_id': ('django.db.models.fields.AutoField', [], {'unique': 'True', 'primary_key': 'True'}),
'doc_received_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'doc_reviewed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'doc_reviewed_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_been_reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'salt': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
}
}
complete_apps = ['application']
|
agpl-3.0
|
jmcarbo/openerp7
|
openerp/addons/project/company.py
|
55
|
1543
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'project_time_mode_id': fields.many2one('product.uom', 'Project Time Unit',
help='This will set the unit of measure used in projects and tasks.\n' \
"If you use the timesheet linked to projects (project_timesheet module), don't " \
"forget to setup the right unit of measure in your employees.",
),
}
res_company()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
absoludity/servo
|
tests/wpt/web-platform-tests/annotation-protocol/tools/protocol-server.py
|
71
|
14265
|
# protocol-server
#
# a reference implementation of the Web Annotation Protocol
#
# Developed by Benjamin Young (@bigbulehat) and Shane McCarron (@halindrome).
# Sponsored by Spec-Ops (https://spec-ops.io)
#
# Copyright (c) 2016 Spec-Ops
#
# for license information, see http://www.w3.org/Consortium/Legal/2008/04-testsuite-copyright.html
import os
import sys
here = os.path.abspath(os.path.split(__file__)[0])
repo_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
sys.path.insert(0, os.path.join(repo_root, "tools"))
sys.path.insert(0, os.path.join(repo_root, "tools", "six"))
sys.path.insert(0, os.path.join(repo_root, "tools", "html5lib"))
sys.path.insert(0, os.path.join(repo_root, "tools", "wptserve"))
sys.path.insert(0, os.path.join(repo_root, "tools", "pywebsocket", "src"))
sys.path.insert(0, os.path.join(repo_root, "tools", "py"))
sys.path.insert(0, os.path.join(repo_root, "tools", "pytest"))
sys.path.insert(0, os.path.join(repo_root, "tools", "webdriver"))
import hashlib
import json
import urlparse
import uuid
import wptserve
myprotocol = 'http'
myhost = 'localhost'
port = 8080
doc_root = os.path.join(repo_root, "annotation-protocol", "files", "")
container_path = doc_root + 'annotations/'
URIroot = myprotocol + '://' + myhost + ':{0}'.format(port)
per_page = 10
MEDIA_TYPE = 'application/ld+json; profile="http://www.w3.org/ns/anno.jsonld"'
# Prefer header variants
PREFER_MINIMAL_CONTAINER = "http://www.w3.org/ns/ldp#PreferMinimalContainer"
PREFER_CONTAINED_IRIS = "http://www.w3.org/ns/oa#PreferContainedIRIs"
PREFER_CONTAINED_DESCRIPTIONS = \
"http://www.w3.org/ns/oa#PreferContainedDescriptions"
# dictionary for annotations that we create on the fly
tempAnnotations = {}
def extract_preference(prefer):
"""Extracts the parameters from a Prefer header's value
>>> extract_preferences('return=representation;include="http://www.w3.org/ns/ldp#PreferMinimalContainer http://www.w3.org/ns/oa#PreferContainedIRIs"')
{"return": "representation", "include": ["http://www.w3.org/ns/ldp#PreferMinimalContainer", "http://www.w3.org/ns/oa#PreferContainedIRIs"]}
"""
obj = {}
if prefer:
params = prefer.split(';')
for p in params:
key, value = p.split('=')
obj[key] = value.strip('"').split(' ')
return obj
def dump_json(obj):
return json.dumps(obj, indent=4, sort_keys=True)
def add_cors_headers(resp):
headers_file = doc_root + 'annotations/cors.headers'
resp.headers.update(load_headers_from_file(headers_file))
def load_headers_from_file(path):
headers = []
with open(path, 'r') as header_file:
data = header_file.read()
headers = [tuple(item.strip() for item in line.split(":", 1))
for line in data.splitlines() if line]
return headers
def annotation_files():
files = []
for file in os.listdir(container_path):
if file.endswith('.jsonld') or file.endswith('.json'):
files.append(file)
for item in list(tempAnnotations.keys()):
files.append(item)
return files
def annotation_iris(skip=0):
iris = []
for filename in annotation_files():
iris.append(URIroot + '/annotations/' + filename)
return iris[skip:][:per_page]
def annotations(skip=0):
annotations = []
files = annotation_files()
for file in files:
if file.startswith("temp-"):
annotations.append(json.loads(tempAnnotations[file]))
else:
with open(container_path + file, 'r') as annotation:
annotations.append(json.load(annotation))
return annotations
def total_annotations():
return len(annotation_files())
@wptserve.handlers.handler
def collection_get(request, response):
"""Annotation Collection handler. NOTE: This also routes paging requests"""
# Paginate if requested
qs = urlparse.parse_qs(request.url_parts.query)
if 'page' in qs:
return page(request, response)
# stub collection
collection_json = {
"@context": [
"http://www.w3.org/ns/anno.jsonld",
"http://www.w3.org/ns/ldp.jsonld"
],
"id": URIroot + "/annotations/",
"type": ["BasicContainer", "AnnotationCollection"],
"total": 0,
"label": "A Container for Web Annotations",
"first": URIroot + "/annotations/?page=0"
}
last_page = (total_annotations() / per_page) - 1
collection_json['last'] = URIroot + "/annotations/?page={0}".format(last_page)
# Default Container format SHOULD be PreferContainedDescriptions
preference = extract_preference(request.headers.get('Prefer'))
if 'include' in preference:
preference = preference['include']
else:
preference = None
collection_json['total'] = total_annotations()
# TODO: calculate last page and add it's page number
if (qs.get('iris') and qs.get('iris')[0] is '1') \
or (preference and PREFER_CONTAINED_IRIS in preference):
return_iris = True
else:
return_iris = False
# only PreferContainedIRIs has unqiue content
if return_iris:
collection_json['id'] += '?iris=1'
collection_json['first'] += '&iris=1'
collection_json['last'] += '&iris=1'
if preference and PREFER_MINIMAL_CONTAINER not in preference:
if return_iris:
collection_json['first'] = annotation_iris()
else:
collection_json['first'] = annotations()
collection_headers_file = doc_root + 'annotations/collection.headers'
add_cors_headers(response)
response.headers.update(load_headers_from_file(collection_headers_file))
# this one's unique per request
response.headers.set('Content-Location', collection_json['id'])
return dump_json(collection_json)
@wptserve.handlers.handler
def collection_head(request, response):
container_path = doc_root + request.request_path
if os.path.isdir(container_path):
response.status = 200
else:
response.status = 404
add_cors_headers(response)
headers_file = doc_root + 'annotations/collection.headers'
for header, value in load_headers_from_file(headers_file):
response.headers.append(header, value)
response.content = None
@wptserve.handlers.handler
def collection_options(request, response):
container_path = doc_root + request.request_path
if os.path.isdir(container_path):
response.status = 200
else:
response.status = 404
add_cors_headers(response)
headers_file = doc_root + 'annotations/collection.options.headers'
for header, value in load_headers_from_file(headers_file):
response.headers.append(header, value)
def page(request, response):
page_json = {
"@context": "http://www.w3.org/ns/anno.jsonld",
"id": URIroot + "/annotations/",
"type": "AnnotationPage",
"partOf": {
"id": URIroot + "/annotations/",
"total": 42023
},
"next": URIroot + "/annotations/",
"items": [
]
}
add_cors_headers(response)
headers_file = doc_root + 'annotations/collection.headers'
response.headers.update(load_headers_from_file(headers_file))
qs = urlparse.parse_qs(request.url_parts.query)
page_num = int(qs.get('page')[0])
page_json['id'] += '?page={0}'.format(page_num)
total = total_annotations()
so_far = (per_page * (page_num+1))
remaining = total - so_far
if page_num != 0:
page_json['prev'] = URIroot + '/annotations/?page={0}'.format(page_num-1)
page_json['partOf']['total'] = total
if remaining > per_page:
page_json['next'] += '?page={0}'.format(page_num+1)
else:
del page_json['next']
if qs.get('iris') and qs.get('iris')[0] is '1':
page_json['items'] = annotation_iris(so_far)
page_json['id'] += '&iris=1'
if 'prev' in page_json:
page_json['prev'] += '&iris=1'
if 'next' in page_json:
page_json['next'] += '&iris=1'
else:
page_json['items'] = annotations(so_far)
return dump_json(page_json)
@wptserve.handlers.handler
def annotation_get(request, response):
"""Individual Annotations"""
requested_file = doc_root + request.request_path[1:]
base = os.path.basename( requested_file )
headers_file = doc_root + 'annotations/annotation.headers'
if base.startswith("temp-") and tempAnnotations[base]:
response.headers.update(load_headers_from_file(headers_file))
response.headers.set('Etag', hashlib.sha1(base).hexdigest())
data = dump_json(tempAnnotations[base])
if data != "" :
response.content = data
response.status = 200
else:
response.content = ""
response.status = 404
elif os.path.isfile(requested_file):
response.headers.update(load_headers_from_file(headers_file))
# Calculate ETag using Apache httpd's default method (more or less)
# http://www.askapache.info//2.3/mod/core.html#fileetag
statinfo = os.stat(requested_file)
etag = "{0}{1}{2}".format(statinfo.st_ino, statinfo.st_mtime,
statinfo.st_size)
# obfuscate so we don't leak info; hexdigest for string compatibility
response.headers.set('Etag', hashlib.sha1(etag).hexdigest())
with open(requested_file, 'r') as data_file:
data = data_file.read()
response.content = data
response.status = 200
else:
response.content = 'Not Found'
response.status = 404
add_cors_headers(response)
@wptserve.handlers.handler
def annotation_head(request, response):
requested_file = doc_root + request.request_path[1:]
base = os.path.basename(requested_file)
headers_file = doc_root + 'annotations/annotation.options.headers'
if base.startswith("temp-") and tempAnnotations[base]:
response.status = 200
response.headers.update(load_headers_from_file(headers_file))
elif os.path.isfile(requested_file):
response.status = 200
response.headers.update(load_headers_from_file(headers_file))
else:
response.status = 404
add_cors_headers(response)
@wptserve.handlers.handler
def annotation_options(request, response):
requested_file = doc_root + request.request_path[1:]
base = os.path.basename(requested_file)
headers_file = doc_root + 'annotations/annotation.options.headers'
if base.startswith("temp-") and tempAnnotations[base]:
response.status = 200
response.headers.update(load_headers_from_file(headers_file))
elif os.path.isfile(requested_file):
response.status = 200
response.headers.update(load_headers_from_file(headers_file))
else:
response.status = 404
add_cors_headers(response)
def create_annotation(body):
# TODO: verify media type is JSON of some kind (at least)
incoming = json.loads(body)
id = "temp-"+str(uuid.uuid4())
if 'id' in incoming:
incoming['canonical'] = incoming['id']
incoming['id'] = URIroot + '/annotations/' + id
return incoming
@wptserve.handlers.handler
def annotation_post(request, response):
incoming = create_annotation(request.body)
newID = incoming['id']
key = os.path.basename(newID)
print "post:" + newID
print "post:" + key
tempAnnotations[key] = dump_json(incoming)
headers_file = doc_root + 'annotations/annotation.headers'
response.headers.update(load_headers_from_file(headers_file))
response.headers.append('Location', newID)
add_cors_headers(response)
response.content = dump_json(incoming)
response.status = 201
@wptserve.handlers.handler
def annotation_put(request, response):
incoming = create_annotation(request.body)
# remember it in our local cache too
# tempAnnotations[request.request_path[1:]] = dump_jason(incoming)
newID = incoming['id']
key = os.path.basename(newID)
print "put:" + newID
print "put:" + key
tempAnnotations[key] = dump_json(incoming)
headers_file = doc_root + 'annotations/annotation.headers'
response.headers.update(load_headers_from_file(headers_file))
response.headers.append('Location', incoming['id'])
add_cors_headers(response)
response.content = dump_json(incoming)
response.status = 200
@wptserve.handlers.handler
def annotation_delete(request, response):
base = os.path.basename(request.request_path[1:])
requested_file = doc_root + request.request_path[1:]
add_cors_headers(response)
headers_file = doc_root + 'annotations/annotation.headers'
try:
if base.startswith("temp-"):
if tempAnnotations[base]:
del tempAnnotations[base]
else:
os.remove(requested_file)
response.headers.update(load_headers_from_file(headers_file))
response.status = 204
response.content = ''
except OSError:
response.status = 404
response.content = 'Not Found'
if __name__ == '__main__':
print 'http://' + myhost + ':{0}/'.format(port)
print 'container URI is http://' + myhost + ':{0}/'.format(port) + "/annotations/"
print 'example annotation URI is http://' + myhost + ':{0}/'.format(port) + "/annotations/anno1.json"
routes = [
("GET", "", wptserve.handlers.file_handler),
("GET", "index.html", wptserve.handlers.file_handler),
# container/collection responses
("HEAD", "annotations/", collection_head),
("OPTIONS", "annotations/", collection_options),
("GET", "annotations/", collection_get),
# create annotations in the collection
("POST", "annotations/", annotation_post),
# single annotation responses
("HEAD", "annotations/*", annotation_head),
("OPTIONS", "annotations/*", annotation_options),
("GET", "annotations/*", annotation_get),
("PUT", "annotations/*", annotation_put),
("DELETE", "annotations/*", annotation_delete)
]
httpd = wptserve.server.WebTestHttpd(host=myhost, bind_hostname=myhost, port=port, doc_root=doc_root,
routes=routes)
httpd.start(block=True)
|
mpl-2.0
|
zooba/PTVS
|
Python/Product/Miniconda/Miniconda3-x64/Lib/multiprocessing/popen_forkserver.py
|
11
|
2016
|
import io
import os
from .context import reduction, set_spawning_popen
if not reduction.HAVE_SEND_HANDLE:
raise ImportError('No support for sending fds between processes')
from . import forkserver
from . import popen_fork
from . import spawn
from . import util
__all__ = ['Popen']
#
# Wrapper for an fd used while launching a process
#
class _DupFd(object):
def __init__(self, ind):
self.ind = ind
def detach(self):
return forkserver.get_inherited_fds()[self.ind]
#
# Start child process using a server process
#
class Popen(popen_fork.Popen):
method = 'forkserver'
DupFd = _DupFd
def __init__(self, process_obj):
self._fds = []
super().__init__(process_obj)
def duplicate_for_child(self, fd):
self._fds.append(fd)
return len(self._fds) - 1
def _launch(self, process_obj):
prep_data = spawn.get_preparation_data(process_obj._name)
buf = io.BytesIO()
set_spawning_popen(self)
try:
reduction.dump(prep_data, buf)
reduction.dump(process_obj, buf)
finally:
set_spawning_popen(None)
self.sentinel, w = forkserver.connect_to_new_process(self._fds)
self.finalizer = util.Finalize(self, os.close, (self.sentinel,))
with open(w, 'wb', closefd=True) as f:
f.write(buf.getbuffer())
self.pid = forkserver.read_signed(self.sentinel)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
from multiprocessing.connection import wait
timeout = 0 if flag == os.WNOHANG else None
if not wait([self.sentinel], timeout):
return None
try:
self.returncode = forkserver.read_signed(self.sentinel)
except (OSError, EOFError):
# This should not happen usually, but perhaps the forkserver
# process itself got killed
self.returncode = 255
return self.returncode
|
apache-2.0
|
Salat-Cx65/python-for-android
|
python-build/python-libs/gdata/build/lib/gdata/tlslite/utils/Python_RSAKey.py
|
239
|
7707
|
"""Pure-Python RSA implementation."""
from cryptomath import *
import xmltools
from ASN1Parser import ASN1Parser
from RSAKey import *
class Python_RSAKey(RSAKey):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if (n and not e) or (e and not n):
raise AssertionError()
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
self.dP = dP
self.dQ = dQ
self.qInv = qInv
self.blinder = 0
self.unblinder = 0
def hasPrivateKey(self):
return self.d != 0
def hash(self):
s = self.writeXMLPublicKey('\t\t')
return hashAndBase64(s.strip())
def _rawPrivateKeyOp(self, m):
#Create blinding values, on the first pass:
if not self.blinder:
self.unblinder = getRandomNumber(2, self.n)
self.blinder = powMod(invMod(self.unblinder, self.n), self.e,
self.n)
#Blind the input
m = (m * self.blinder) % self.n
#Perform the RSA operation
c = self._rawPrivateKeyOpHelper(m)
#Unblind the output
c = (c * self.unblinder) % self.n
#Update blinding values
self.blinder = (self.blinder * self.blinder) % self.n
self.unblinder = (self.unblinder * self.unblinder) % self.n
#Return the output
return c
def _rawPrivateKeyOpHelper(self, m):
#Non-CRT version
#c = powMod(m, self.d, self.n)
#CRT version (~3x faster)
s1 = powMod(m, self.dP, self.p)
s2 = powMod(m, self.dQ, self.q)
h = ((s1 - s2) * self.qInv) % self.p
c = s2 + self.q * h
return c
def _rawPublicKeyOp(self, c):
m = powMod(c, self.e, self.n)
return m
def acceptsPassword(self): return False
def write(self, indent=''):
if self.d:
s = indent+'<privateKey xmlns="http://trevp.net/rsa">\n'
else:
s = indent+'<publicKey xmlns="http://trevp.net/rsa">\n'
s += indent+'\t<n>%s</n>\n' % numberToBase64(self.n)
s += indent+'\t<e>%s</e>\n' % numberToBase64(self.e)
if self.d:
s += indent+'\t<d>%s</d>\n' % numberToBase64(self.d)
s += indent+'\t<p>%s</p>\n' % numberToBase64(self.p)
s += indent+'\t<q>%s</q>\n' % numberToBase64(self.q)
s += indent+'\t<dP>%s</dP>\n' % numberToBase64(self.dP)
s += indent+'\t<dQ>%s</dQ>\n' % numberToBase64(self.dQ)
s += indent+'\t<qInv>%s</qInv>\n' % numberToBase64(self.qInv)
s += indent+'</privateKey>'
else:
s += indent+'</publicKey>'
#Only add \n if part of a larger structure
if indent != '':
s += '\n'
return s
def writeXMLPublicKey(self, indent=''):
return Python_RSAKey(self.n, self.e).write(indent)
def generate(bits):
key = Python_RSAKey()
p = getRandomPrime(bits/2, False)
q = getRandomPrime(bits/2, False)
t = lcm(p-1, q-1)
key.n = p * q
key.e = 3L #Needed to be long, for Java
key.d = invMod(key.e, t)
key.p = p
key.q = q
key.dP = key.d % (p-1)
key.dQ = key.d % (q-1)
key.qInv = invMod(q, p)
return key
generate = staticmethod(generate)
def parsePEM(s, passwordCallback=None):
"""Parse a string containing a <privateKey> or <publicKey>, or
PEM-encoded key."""
start = s.find("-----BEGIN PRIVATE KEY-----")
if start != -1:
end = s.find("-----END PRIVATE KEY-----")
if end == -1:
raise SyntaxError("Missing PEM Postfix")
s = s[start+len("-----BEGIN PRIVATE KEY -----") : end]
bytes = base64ToBytes(s)
return Python_RSAKey._parsePKCS8(bytes)
else:
start = s.find("-----BEGIN RSA PRIVATE KEY-----")
if start != -1:
end = s.find("-----END RSA PRIVATE KEY-----")
if end == -1:
raise SyntaxError("Missing PEM Postfix")
s = s[start+len("-----BEGIN RSA PRIVATE KEY -----") : end]
bytes = base64ToBytes(s)
return Python_RSAKey._parseSSLeay(bytes)
raise SyntaxError("Missing PEM Prefix")
parsePEM = staticmethod(parsePEM)
def parseXML(s):
element = xmltools.parseAndStripWhitespace(s)
return Python_RSAKey._parseXML(element)
parseXML = staticmethod(parseXML)
def _parsePKCS8(bytes):
p = ASN1Parser(bytes)
version = p.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized PKCS8 version")
rsaOID = p.getChild(1).value
if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:
raise SyntaxError("Unrecognized AlgorithmIdentifier")
#Get the privateKey
privateKeyP = p.getChild(2)
#Adjust for OCTET STRING encapsulation
privateKeyP = ASN1Parser(privateKeyP.value)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parsePKCS8 = staticmethod(_parsePKCS8)
def _parseSSLeay(bytes):
privateKeyP = ASN1Parser(bytes)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parseSSLeay = staticmethod(_parseSSLeay)
def _parseASN1PrivateKey(privateKeyP):
version = privateKeyP.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = bytesToNumber(privateKeyP.getChild(1).value)
e = bytesToNumber(privateKeyP.getChild(2).value)
d = bytesToNumber(privateKeyP.getChild(3).value)
p = bytesToNumber(privateKeyP.getChild(4).value)
q = bytesToNumber(privateKeyP.getChild(5).value)
dP = bytesToNumber(privateKeyP.getChild(6).value)
dQ = bytesToNumber(privateKeyP.getChild(7).value)
qInv = bytesToNumber(privateKeyP.getChild(8).value)
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
_parseASN1PrivateKey = staticmethod(_parseASN1PrivateKey)
def _parseXML(element):
try:
xmltools.checkName(element, "privateKey")
except SyntaxError:
xmltools.checkName(element, "publicKey")
#Parse attributes
xmltools.getReqAttribute(element, "xmlns", "http://trevp.net/rsa\Z")
xmltools.checkNoMoreAttributes(element)
#Parse public values (<n> and <e>)
n = base64ToNumber(xmltools.getText(xmltools.getChild(element, 0, "n"), xmltools.base64RegEx))
e = base64ToNumber(xmltools.getText(xmltools.getChild(element, 1, "e"), xmltools.base64RegEx))
d = 0
p = 0
q = 0
dP = 0
dQ = 0
qInv = 0
#Parse private values, if present
if element.childNodes.length>=3:
d = base64ToNumber(xmltools.getText(xmltools.getChild(element, 2, "d"), xmltools.base64RegEx))
p = base64ToNumber(xmltools.getText(xmltools.getChild(element, 3, "p"), xmltools.base64RegEx))
q = base64ToNumber(xmltools.getText(xmltools.getChild(element, 4, "q"), xmltools.base64RegEx))
dP = base64ToNumber(xmltools.getText(xmltools.getChild(element, 5, "dP"), xmltools.base64RegEx))
dQ = base64ToNumber(xmltools.getText(xmltools.getChild(element, 6, "dQ"), xmltools.base64RegEx))
qInv = base64ToNumber(xmltools.getText(xmltools.getLastChild(element, 7, "qInv"), xmltools.base64RegEx))
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
_parseXML = staticmethod(_parseXML)
|
apache-2.0
|
QuickSander/CouchPotatoServer
|
libs/CodernityDB/index.py
|
81
|
4746
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import marshal
import struct
import shutil
from CodernityDB.storage import IU_Storage, DummyStorage
try:
from CodernityDB import __version__
except ImportError:
from __init__ import __version__
import io
class IndexException(Exception):
pass
class IndexNotFoundException(IndexException):
pass
class ReindexException(IndexException):
pass
class TryReindexException(ReindexException):
pass
class ElemNotFound(IndexException):
pass
class DocIdNotFound(ElemNotFound):
pass
class IndexConflict(IndexException):
pass
class IndexPreconditionsException(IndexException):
pass
class Index(object):
__version__ = __version__
custom_header = "" # : use it for imports required by your index
def __init__(self,
db_path,
name):
self.name = name
self._start_ind = 500
self.db_path = db_path
def open_index(self):
if not os.path.isfile(os.path.join(self.db_path, self.name + '_buck')):
raise IndexException("Doesn't exists")
self.buckets = io.open(
os.path.join(self.db_path, self.name + "_buck"), 'r+b', buffering=0)
self._fix_params()
self._open_storage()
def _close(self):
self.buckets.close()
self.storage.close()
def close_index(self):
self.flush()
self.fsync()
self._close()
def create_index(self):
raise NotImplementedError()
def _fix_params(self):
self.buckets.seek(0)
props = marshal.loads(self.buckets.read(self._start_ind))
for k, v in props.iteritems():
self.__dict__[k] = v
self.buckets.seek(0, 2)
def _save_params(self, in_params={}):
self.buckets.seek(0)
props = marshal.loads(self.buckets.read(self._start_ind))
props.update(in_params)
self.buckets.seek(0)
data = marshal.dumps(props)
if len(data) > self._start_ind:
raise IndexException("To big props")
self.buckets.write(data)
self.flush()
self.buckets.seek(0, 2)
self.__dict__.update(props)
def _open_storage(self, *args, **kwargs):
pass
def _create_storage(self, *args, **kwargs):
pass
def _destroy_storage(self, *args, **kwargs):
self.storage.destroy()
def _find_key(self, key):
raise NotImplementedError()
def update(self, doc_id, key, start, size):
raise NotImplementedError()
def insert(self, doc_id, key, start, size):
raise NotImplementedError()
def get(self, key):
raise NotImplementedError()
def get_many(self, key, start_from=None, limit=0):
raise NotImplementedError()
def all(self, start_pos):
raise NotImplementedError()
def delete(self, key, start, size):
raise NotImplementedError()
def make_key_value(self, data):
raise NotImplementedError()
def make_key(self, data):
raise NotImplementedError()
def compact(self, *args, **kwargs):
raise NotImplementedError()
def destroy(self, *args, **kwargs):
self._close()
bucket_file = os.path.join(self.db_path, self.name + '_buck')
os.unlink(bucket_file)
self._destroy_storage()
self._find_key.clear()
def flush(self):
try:
self.buckets.flush()
self.storage.flush()
except:
pass
def fsync(self):
try:
os.fsync(self.buckets.fileno())
self.storage.fsync()
except:
pass
def update_with_storage(self, doc_id, key, value):
if value:
start, size = self.storage.insert(value)
else:
start = 1
size = 0
return self.update(doc_id, key, start, size)
def insert_with_storage(self, doc_id, key, value):
if value:
start, size = self.storage.insert(value)
else:
start = 1
size = 0
return self.insert(doc_id, key, start, size)
|
gpl-3.0
|
Arcanemagus/plexpy
|
lib/cloudinary/search.py
|
3
|
1732
|
import json
from copy import deepcopy
from . import api
class Search:
"""Build and execute a search query."""
def __init__(self):
self.query = {}
def expression(self, value):
"""Specify the search query expression."""
self.query["expression"] = value
return self
def max_results(self, value):
"""Set the max results to return"""
self.query["max_results"] = value
return self
def next_cursor(self, value):
"""Get next page in the query using the ``next_cursor`` value from a previous invocation."""
self.query["next_cursor"] = value
return self
def sort_by(self, field_name, direction=None):
"""Add a field to sort results by. If not provided, direction is ``desc``."""
if direction is None:
direction = 'desc'
self._add("sort_by", {field_name: direction})
return self
def aggregate(self, value):
"""Aggregate field."""
self._add("aggregate", value)
return self
def with_field(self, value):
"""Request an additional field in the result set."""
self._add("with_field", value)
return self
def to_json(self):
return json.dumps(self.query)
def execute(self, **options):
"""Execute the search and return results."""
options["content_type"] = 'application/json'
uri = ['resources','search']
return api.call_json_api('post', uri, self.as_dict(), **options)
def _add(self, name, value):
if name not in self.query:
self.query[name] = []
self.query[name].append(value)
return self
def as_dict(self):
return deepcopy(self.query)
|
gpl-3.0
|
mediatum/mediatum-docs
|
docs/source/conf.py
|
1
|
10337
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# mediatum documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 31 15:26:33 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
rst_prolog = """
.. |mdT| replace:: mediaTUM
.. |mdTurl| replace:: https://mediatum.ub.tum.de
.. |mdt| replace:: Mediatum
.. |mdturl| replace:: http://mediatum.github.io
"""
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
#project = 'mediaTUM'
project = 'mediaTUM'
copyright = '2016, mediaTUM authors'
author = 'mediaTUM authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v2017'
# The full version, including alpha/beta/rc tags.
release = 'v2017'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
#locale_dirs = ['locale/'] # path is example but recommended.
#gettext_compact = False # optional.
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
#html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = { # for alabaster
'github_user': 'mediatum',
'github_repo': 'mediatum',
'show_powered_by': False,
'sidebar_width': '16em',
'page_width': '70em',
'fixed_sidebar': True, # only in ver 0.7.8 (May 2016)
'extra_nav_links': {'mediatum.github.io': 'http://mediatum.github.io',
'github.com/mediatum': 'https:/github.com/mediatum',
'Impressum': 'https://www.ub.tum.de/impressum'},
'font_family': 'Arial, Helvetica, sans-serif',
'head_font_family': 'Arial, Helvetica, sans-serif',
'caption_font_size': '85%',
'font_size': '120%'
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'mediatumdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mediatum.tex', 'mediaTUM Documentation',
'mediaTUM authors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mediaTUM', 'mediaTUM Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mediaTUM', 'mediaTUM Documentation',
author, 'mediaTUM Team', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
gpl-3.0
|
ajenhl/eats
|
server/eats/models/entity_type_property_assertion.py
|
1
|
2595
|
from tmapi.models import Association
from base_manager import BaseManager
from entity_type import EntityType
from property_assertion import PropertyAssertion
class EntityTypePropertyAssertionManager (BaseManager):
def filter_by_authority_entity_type (self, authority, entity_type):
return self.filter(scope=authority).filter(
roles__type=self.eats_topic_map.property_role_type,
roles__player=entity_type)
def filter_by_entity (self, entity):
return self.filter(roles__type=self.eats_topic_map.entity_role_type,
roles__player=entity)
def get_queryset (self):
assertion_type = self.eats_topic_map.entity_type_assertion_type
qs = super(EntityTypePropertyAssertionManager, self).get_queryset()
return qs.filter(type=assertion_type)
class EntityTypePropertyAssertion (Association, PropertyAssertion):
objects = EntityTypePropertyAssertionManager()
class Meta:
proxy = True
app_label = 'eats'
@property
def entity_type (self):
"""Returns the entity type being asserted.
:rtype: `EntityType`
"""
if not hasattr(self, '_entity_type'):
property_role = self.get_roles(
self.eats_topic_map.property_role_type)[0]
self._entity_type = property_role.get_player(proxy=EntityType)
return self._entity_type
def set_players (self, entity, entity_type):
"""Sets the entity and entity type involved in this property
assertion.
:param entity: the entity
:type entity: `Entity`
:param entity_type: the entity type
:type entity_type: `Topic`
"""
if hasattr(self, '_entity') or hasattr(self, '_entity_type'):
raise Exception(
'set_players may be called only once for a property assertion')
self.create_role(self.eats_topic_map.property_role_type, entity_type)
self._entity_type = entity_type
self.create_role(self.eats_topic_map.entity_role_type, entity)
self._entity = entity
def update (self, entity_type):
"""Updates this property assertion.
:param entity_type: entity type
:type entity_type: `Topic`
"""
if entity_type != self.entity_type:
self.authority.validate_components(entity_type=entity_type)
property_role = self.get_roles(
self.eats_topic_map.property_role_type)[0]
property_role.set_player(entity_type)
self._entity_type = entity_type
|
gpl-3.0
|
chase-seibert/flask-admin
|
flask_admin/contrib/sqla/fields.py
|
25
|
8740
|
"""
Useful form fields for use with SQLAlchemy ORM.
"""
import operator
from wtforms import widgets
from wtforms.fields import SelectFieldBase
from wtforms.validators import ValidationError
from .tools import get_primary_key
from flask_admin._compat import text_type, string_types
from flask_admin.form import FormOpts
from flask_admin.model.fields import InlineFieldList, InlineModelFormField
from flask_admin.model.widgets import InlineFormWidget
try:
from sqlalchemy.orm.util import identity_key
has_identity_key = True
except ImportError:
has_identity_key = False
class QuerySelectField(SelectFieldBase):
"""
Will display a select drop-down field to choose between ORM results in a
sqlalchemy `Query`. The `data` property actually will store/keep an ORM
model instance, not the ID. Submitting a choice which is not in the query
will result in a validation error.
This field only works for queries on models whose primary key column(s)
have a consistent string representation. This means it mostly only works
for those composed of string, unicode, and integer types. For the most
part, the primary keys will be auto-detected from the model, alternately
pass a one-argument callable to `get_pk` which can return a unique
comparable key.
The `query` property on the field can be set from within a view to assign
a query per-instance to the field. If the property is not set, the
`query_factory` callable passed to the field constructor will be called to
obtain a query.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` or `__unicode__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for this blank choice can be set by specifying the
`blank_text` parameter.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, query_factory=None,
get_pk=None, get_label=None, allow_blank=False,
blank_text=u'', **kwargs):
super(QuerySelectField, self).__init__(label, validators, **kwargs)
self.query_factory = query_factory
if get_pk is None:
if not has_identity_key:
raise Exception(u'The sqlalchemy identity_key function could not be imported.')
self.get_pk = get_pk_from_identity
else:
self.get_pk = get_pk
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self.query = None
self._object_list = None
def _get_data(self):
if self._formdata is not None:
for pk, obj in self._get_object_list():
if pk == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _get_object_list(self):
if self._object_list is None:
query = self.query or self.query_factory()
get_pk = self.get_pk
self._object_list = [(text_type(get_pk(obj)), obj) for obj in query]
return self._object_list
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj == self.data)
def process_formdata(self, valuelist):
if valuelist:
if self.allow_blank and valuelist[0] == u'__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for pk, obj in self._get_object_list():
if self.data == obj:
break
else:
raise ValidationError(self.gettext(u'Not a valid choice'))
class QuerySelectMultipleField(QuerySelectField):
"""
Very similar to QuerySelectField with the difference that this will
display a multiple select. The data property will hold a list with ORM
model instances and will be an empty list when no value is selected.
If any of the items in the data list or submitted form data cannot be
found in the query, this will result in a validation error.
"""
widget = widgets.Select(multiple=True)
def __init__(self, label=None, validators=None, default=None, **kwargs):
if default is None:
default = []
super(QuerySelectMultipleField, self).__init__(label, validators, default=default, **kwargs)
self._invalid_formdata = False
def _get_data(self):
formdata = self._formdata
if formdata is not None:
data = []
for pk, obj in self._get_object_list():
if not formdata:
break
elif pk in formdata:
formdata.remove(pk)
data.append(obj)
if formdata:
self._invalid_formdata = True
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj in self.data)
def process_formdata(self, valuelist):
self._formdata = set(valuelist)
def pre_validate(self, form):
if self._invalid_formdata:
raise ValidationError(self.gettext(u'Not a valid choice'))
elif self.data:
obj_list = list(x[1] for x in self._get_object_list())
for v in self.data:
if v not in obj_list:
raise ValidationError(self.gettext(u'Not a valid choice'))
class InlineModelFormList(InlineFieldList):
"""
Customized inline model form list field.
"""
form_field_type = InlineModelFormField
"""
Form field type. Override to use custom field for each inline form
"""
def __init__(self, form, session, model, prop, inline_view, **kwargs):
"""
Default constructor.
:param form:
Form for the related model
:param session:
SQLAlchemy session
:param model:
Related model
:param prop:
Related property name
:param inline_view:
Inline view
"""
self.form = form
self.session = session
self.model = model
self.prop = prop
self.inline_view = inline_view
self._pk = get_primary_key(model)
# Generate inline form field
form_opts = FormOpts(widget_args=getattr(inline_view, 'form_widget_args', None),
form_rules=inline_view._form_rules)
form_field = self.form_field_type(form, self._pk, form_opts=form_opts)
super(InlineModelFormList, self).__init__(form_field, **kwargs)
def display_row_controls(self, field):
return field.get_pk() is not None
def populate_obj(self, obj, name):
values = getattr(obj, name, None)
if values is None:
return
# Create primary key map
pk_map = dict((str(getattr(v, self._pk)), v) for v in values)
# Handle request data
for field in self.entries:
field_id = field.get_pk()
if field_id in pk_map:
model = pk_map[field_id]
if self.should_delete(field):
self.session.delete(model)
continue
else:
model = self.model()
values.append(model)
field.populate_obj(model, None)
self.inline_view.on_model_change(field, model)
def get_pk_from_identity(obj):
# TODO: Remove me
cls, key = identity_key(instance=obj)
return u':'.join(text_type(x) for x in key)
|
bsd-3-clause
|
stonebig/bokeh
|
bokeh/protocol/__init__.py
|
2
|
3715
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Implement and provide message protocols for communication between Bokeh
Servers and clients.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
from tornado.escape import json_decode
# Bokeh imports
from . import messages
from . import versions
from .exceptions import ProtocolError
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Protocol',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class Protocol(object):
''' Provide a message factory for a given version of the Bokeh Server
message protocol.
Args:
version (str) : a string identifying a protocol version, e.g. "1.0"
'''
def __init__(self, version):
if version not in versions.spec:
raise ProtocolError("Unknown protocol version %r" % version)
self._version = version
self._messages = dict()
for msgtype, revision in versions.spec[version]:
self._messages[msgtype] = messages.index[(msgtype, revision)]
def __repr__(self):
return "Protocol(%r)" % self.version
def create(self, msgtype, *args, **kwargs):
''' Create a new Message instance for the given type.
Args:
msgtype (str) :
'''
if msgtype not in self._messages:
raise ProtocolError("Unknown message type %r for protocol version %s" % (msgtype, self._version))
return self._messages[msgtype].create(*args, **kwargs)
def assemble(self, header_json, metadata_json, content_json):
''' Create a Message instance assembled from json fragments.
Args:
header_json (``JSON``) :
metadata_json (``JSON``) :
content_json (``JSON``) :
Returns:
message
'''
header = json_decode(header_json)
if 'msgtype' not in header:
log.error("Bad header with no msgtype was: %r", header)
raise ProtocolError("No 'msgtype' in header")
return self._messages[header['msgtype']].assemble(
header_json, metadata_json, content_json
)
@property
def version(self):
return self._version
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
bsd-3-clause
|
gussan/kyototycoon
|
example/ktrestex.py
|
12
|
1597
|
import time
import urllib
import http.client
# RESTful interface of Kyoto Tycoon
class KyotoTycoon:
# connect to the server
def open(self, host = "127.0.0.1", port = 1978, timeout = 30):
self.ua = http.client.HTTPConnection(host, port, False, timeout)
# close the connection
def close(self):
self.ua.close()
# store a record
def set(self, key, value, xt = None):
if isinstance(key, str): key = key.encode("UTF-8")
if isinstance(value, str): value = value.encode("UTF-8")
key = "/" + urllib.parse.quote(key)
headers = {}
if xt != None:
xt = int(time.time()) + xt
headers["X-Kt-Xt"] = str(xt)
self.ua.request("PUT", key, value, headers)
res = self.ua.getresponse()
body = res.read()
return res.status == 201
# remove a record
def remove(self, key):
if isinstance(key, str): key = key.encode("UTF-8")
key = "/" + urllib.parse.quote(key)
self.ua.request("DELETE", key)
res = self.ua.getresponse()
body = res.read()
return res.status == 204
# retrieve the value of a record
def get(self, key):
if isinstance(key, str): key = key.encode("UTF-8")
key = "/" + urllib.parse.quote(key)
self.ua.request("GET", key)
res = self.ua.getresponse()
body = res.read()
if res.status != 200: return None
return body
# sample usage
kt = KyotoTycoon()
kt.open("localhost", 1978)
kt.set("japan", "tokyo", 60)
print(kt.get("japan"))
kt.remove("japan")
kt.close()
|
gpl-3.0
|
sebrandon1/bitcoin
|
test/functional/test_runner.py
|
1
|
24076
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
# 20 minutes represented in seconds
TRAVIS_TIMEOUT_DURATION = 20 * 60
BASE_SCRIPTS = [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_labels.py',
'p2p_segwit.py',
'wallet_dump.py',
'wallet_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_bitcoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'interface_http.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'mining_prioritisetransaction.py',
'p2p_invalid_block.py',
'p2p_invalid_tx.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'rpc_signmessage.py',
'feature_nulldummy.py',
'mempool_accept.py',
'wallet_import_rescan.py',
'rpc_bind.py --ipv4',
'rpc_bind.py --ipv6',
'rpc_bind.py --nonloopback',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'wallet_fallbackfee.py',
'feature_minchainwork.py',
'rpc_getblockstats.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'p2p_unrequested_blocks.py',
'feature_includeconf.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_blocksdir.py',
'feature_config_args.py',
'feature_help.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
# vv Tests less than 20m vv
'feature_fee_estimation.py',
# vv Tests less than 5m vv
'feature_maxuploadtarget.py',
'mempool_packages.py',
'feature_dbcrash.py',
# vv Tests less than 2m vv
'feature_bip68_sequence.py',
'mining_getblocktemplate_longpoll.py',
'p2p_timeouts.py',
# vv Tests less than 60s vv
'p2p_feefilter.py',
# vv Tests less than 30s vv
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
parser.add_argument('--failfast', action='store_true', help='stop execution after the first test failure')
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/bitcoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and bitcoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", test) + ".py" for test in tests]
for test in tests:
if test in ALL_SCRIPTS:
test_list.append(test)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [re.sub("\.py$", "", test) + ".py" for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list,
config["environment"]["SRCDIR"],
config["environment"]["BUILDDIR"],
tmpdir,
jobs=args.jobs,
enable_coverage=args.coverage,
args=passon_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
)
def run_tests(test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False):
args = args or []
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "bitcoind"]) is not None:
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
tests_dir = src_dir + '/test/functional/'
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exiting after test failure")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if os.getenv('TRAVIS') == 'true' and int(time.time() - start_time) > TRAVIS_TIMEOUT_DURATION:
# In travis, timeout individual tests (to stop tests hanging and not providing useful output).
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
print('.', end='', flush=True)
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
procs = [i[2] for i in self.jobs]
for proc in procs:
proc.kill()
for proc in procs:
proc.wait()
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that test scripts start with one of the allowed name prefixes."""
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if bad_script_names:
print("%sERROR:%s %d tests not meeting naming conventions:" % (BOLD[1], BOLD[0], len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
raise AssertionError("Some tests are not following naming convention!")
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
mit
|
svanschalkwyk/datafari
|
windows/python/Tools/versioncheck/pyversioncheck.py
|
98
|
4051
|
"""pyversioncheck - Module to help with checking versions"""
import types
import rfc822
import urllib
import sys
# Verbose options
VERBOSE_SILENT=0 # Single-line reports per package
VERBOSE_NORMAL=1 # Single-line reports per package, more info if outdated
VERBOSE_EACHFILE=2 # Report on each URL checked
VERBOSE_CHECKALL=3 # Check each URL for each package
# Test directory
## urllib bug: _TESTDIR="ftp://ftp.cwi.nl/pub/jack/python/versiontestdir/"
_TESTDIR="http://www.cwi.nl/~jack/versiontestdir/"
def versioncheck(package, url, version, verbose=0):
ok, newversion, fp = checkonly(package, url, version, verbose)
if verbose > VERBOSE_NORMAL:
return ok
if ok < 0:
print '%s: No correctly formatted current version file found'%(package)
elif ok == 1:
print '%s: up-to-date (version %s)'%(package, version)
else:
print '%s: version %s installed, version %s found:' % \
(package, version, newversion)
if verbose > VERBOSE_SILENT:
while 1:
line = fp.readline()
if not line: break
sys.stdout.write('\t'+line)
return ok
def checkonly(package, url, version, verbose=0):
if verbose >= VERBOSE_EACHFILE:
print '%s:'%package
if type(url) == types.StringType:
ok, newversion, fp = _check1version(package, url, version, verbose)
else:
for u in url:
ok, newversion, fp = _check1version(package, u, version, verbose)
if ok >= 0 and verbose < VERBOSE_CHECKALL:
break
return ok, newversion, fp
def _check1version(package, url, version, verbose=0):
if verbose >= VERBOSE_EACHFILE:
print ' Checking %s'%url
try:
fp = urllib.urlopen(url)
except IOError, arg:
if verbose >= VERBOSE_EACHFILE:
print ' Cannot open:', arg
return -1, None, None
msg = rfc822.Message(fp, seekable=0)
newversion = msg.getheader('current-version')
if not newversion:
if verbose >= VERBOSE_EACHFILE:
print ' No "Current-Version:" header in URL or URL not found'
return -1, None, None
version = version.lower().strip()
newversion = newversion.lower().strip()
if version == newversion:
if verbose >= VERBOSE_EACHFILE:
print ' Version identical (%s)'%newversion
return 1, version, fp
else:
if verbose >= VERBOSE_EACHFILE:
print ' Versions different (installed: %s, new: %s)'% \
(version, newversion)
return 0, newversion, fp
def _test():
print '--- TEST VERBOSE=1'
print '--- Testing existing and identical version file'
versioncheck('VersionTestPackage', _TESTDIR+'Version10.txt', '1.0', verbose=1)
print '--- Testing existing package with new version'
versioncheck('VersionTestPackage', _TESTDIR+'Version11.txt', '1.0', verbose=1)
print '--- Testing package with non-existing version file'
versioncheck('VersionTestPackage', _TESTDIR+'nonexistent.txt', '1.0', verbose=1)
print '--- Test package with 2 locations, first non-existing second ok'
versfiles = [_TESTDIR+'nonexistent.txt', _TESTDIR+'Version10.txt']
versioncheck('VersionTestPackage', versfiles, '1.0', verbose=1)
print '--- TEST VERBOSE=2'
print '--- Testing existing and identical version file'
versioncheck('VersionTestPackage', _TESTDIR+'Version10.txt', '1.0', verbose=2)
print '--- Testing existing package with new version'
versioncheck('VersionTestPackage', _TESTDIR+'Version11.txt', '1.0', verbose=2)
print '--- Testing package with non-existing version file'
versioncheck('VersionTestPackage', _TESTDIR+'nonexistent.txt', '1.0', verbose=2)
print '--- Test package with 2 locations, first non-existing second ok'
versfiles = [_TESTDIR+'nonexistent.txt', _TESTDIR+'Version10.txt']
versioncheck('VersionTestPackage', versfiles, '1.0', verbose=2)
if __name__ == '__main__':
_test()
|
apache-2.0
|
ttm/percolationlegacy
|
percolation/sparql/classes.py
|
1
|
17704
|
__doc__="""
NOTES:
See https://rdflib.readthedocs.org/en/4.2.1/_modules/rdflib/plugins/stores/sparqlstore.html
where SparQLClient + SparQLQuery is called a "sparql store"
INSERTs and DELETEs without a WHERE clause have the DATA keyword: INSERT DATA { ... } DELETE DATA { ... }.
DELETE INSERT WHERE queries can't swap to INSERT DELETE WHERE. (The DELETE WHERE is in fact a D I W query without I)
Even so, I cant get this query to work: DELETE { GRAPH <http://purl.org/socialparticipation/po/AuxGraph#1> { ?s ?p ?o . } } INSERT { GRAPH <urn:x-arq:DefaultGraph> { ?s ?p ?o . } } WHERE { GRAPH <http://purl.org/socialparticipation/po/AuxGraph#1> { ?s <http://purl.org/socialparticipation/po/snapshot> <http://purl.org/socialparticipation/po/Snapshot#GeorgeSanders08032014_fb> . } } '
URI: tdb:unionDefaultGraph the union graph (don't seem to work now, maybe bugged)
URI: urn:x-arq:DefaultGraph the default graph (seem to work)
Legacy have had to split delete insert where query in 2: a insert where and delete where because of the bug above.
a reference query that was dropped
insert=(
("?i1",NS.po.snapshot,snapshot),
("_:mblank",a,NS.po.ParticipantAttributes),
("_:mblank",NS.po.participant,"?i1"),
("_:mblank","?p","?o"),
("_:mblank",NS.po.snapshot,snapshot),
)
where=(
("?i1",a,NS.po.Participant),
("?i1","?p","?o"),
)
querystring=P.sparql.functions.buildQuery(triples1=insert,graph1=self.graphidAUX,
#triples2=where,graph2=self.graphidAUX,modifier2=" MINUS {?i1 a ?foovar} "
triples2=where,graph2=self.graphidAUX,modifier2=" FILTER(?p!=<%s>) "%(a,),
method="insert_where")
in favor of a variation of:
DELETE { ?s ?p ?o }
INSERT { ?s1 ?p ?o }
WHERE
{
{ SELECT (uri(concat("http://another.domain.org/",
SUBSTR(str(?s),24)) )
AS ?s1)
{
?s ?p ?o .
FILTER regex(str(?s), "^http://some.domain.org/")
}}
}
BACKUP
delete=(
("?s","?p","?o"),
("?s3","?p3","?s"),
)
insert=(
("?s1","?p","?o"),
("?s1",NS.po.genericURI,"?s"),
("?s1",NS.po.snapshot,snapshot),
("?s3","?p3","?s1"),
)
where=(
("?s",a,NS.po.Participant),
("?s","?p","?o"),
("OPTIONAL","?s3","?p3","?s"),
)
startB3_=" SELECT ?s ?p ?o ?s3 ?p3 (uri(concat(?s,'--','%s') ) AS ?s1) {"%(snapshot,)
body3close_= " } } } "
querystring=P.sparql.functions.buildQuery(
triples1=delete,graph1=self.graphidAUX,
triples2=insert,graph2=self.graphidAUX,
triples3=where, graph3=self.graphidAUX,
body3modifier=startB3_,body3close_=body3close_,
method="delete_insert_where")
"""
import os
import rdflib as r, networkx as x, percolation as P
from SPARQLWrapper import SPARQLWrapper, JSON
c=P.utils.check
NS=P.rdf.NS
a=NS.rdf.type
default="urn:x-arq:DefaultGraph"
default=NS.po.MainGraph+"#1"
default="default"
g=r.Graph()
#g.addN(P.rdf.makeMetadata())
class SparQLClient:
"""Fuseki connection maintainer through rdflib"""
def __init__(self,endpoint_url):
self.endpoint=SPARQLWrapper(endpoint_url)
self.endpoint_url=endpoint_url
def addLocalFileToEndpoint(self,tfile,tgraph=default):
cmd="s-post {} {} {}".format(self.endpoint_url,tgraph,tfile)
self.cmdline=cmd
os.system(cmd)
def removeLocalFileFromEndpoint(self,tfile,tgraph=default):
cmd="s-delete {} {} {}".format(self.endpoint_url,tgraph,tfile)
os.system(cmd)
def restablishConnection(self,endpoint_url=None):
if not endpoint_url:
endpoint_url=self.endpoint_url
self.endpoint=SPARQLWrapper(endpoint_url)
self.endpoint_url=endpoint_url
self.endpoint.method = 'POST'
self.endpoint.setReturnFormat(JSON)
class SparQLQueries:
"""Covenience class for inheritance with SparQLClient and SparQLLegacy"""
iquery=[]
rquery=[]
def clearEndpoint(self,tgraph=default):
if tgraph:
query="CLEAR GRAPH <%s>"%(tgraph,)
else:
query="CLEAR DEFAULT"
self.updateQuery(query)
def addRemoteFileToEndpoint(self,remote_file_url,tgraph=default):
part1="LOAD <%s> "%(remote_file_url)
if tgraph:
part2=" [ INTO <%s> ] "%(tgraph,)
query=part1+part2
self.updateQuery(query)
raise NotImplementedError("Need to validate. Never been used")
def insertTriples(self,triples,graph1=default):
querystring=P.sparql.functions.buildQuery(triples,graph1=graph1,method="insert")
self.iquery+=[querystring]
self.result=self.updateQuery(querystring)
def retrieveFromTriples(self,triples,graph1=default,modifier1="",startB_=None):
querystring=P.sparql.functions.buildQuery(triples,graph1=graph1,modifier1=modifier1,startB_=startB_)
self.rquery+=[querystring]
return self.retrieveQuery(querystring)
def retrieveQuery(self,querystring):
"""Query for retrieving information (e.g. through select)"""
self.endpoint.method="GET"
self.endpoint.setReturnFormat(JSON)
return self.performQuery(querystring)
def updateQuery(self,querystring):
"""Query to insert, delete and modify knowledge https://www.w3.org/Submission/SPARQL-Update/"""
self.endpoint.method="POST"
return self.performQuery(querystring)
def performQuery(self,querystring):
"""Query method is defined at SparQLClient initialization."""
# self.method=POST
self.endpoint.setQuery(querystring)
return self.endpoint.queryAndConvert()
def getAllTriples(self,graph1=default):
qtriples=(("?s", "?p", "?o"),)
self.triples=P.sparql.functions.plainQueryValues(self.retrieveFromTriples(qtriples,graph1=graph1))
def getNTriples(self,graph1=default):
qtriples=(("?s", "?p", "?o"),)
self.ntriples=P.sparql.functions.plainQueryValues(self.retrieveFromTriples(qtriples,graph1=graph1,startB_=" (COUNT(*) as ?nt) WHERE { "))[0]
def insertOntology(self,graph1=default):
self.insertTriples(P.rdf.makeOntology(),graph1=graph1)
# self.getNTriples(), P.utils.writeTriples(self.triples,"{}dummy.ttl".format(triples_dir))
class SparQLLegacyConvenience:
"""Convenience class for query and renderind analysis strictures, tables and figures"""
graphidAUX=NS.po.AuxGraph+"#1"
graphidAUX2=NS.po.AuxGraph+"#2"
graphidAUX3=NS.po.AuxGraph+"#3"
graphidAUX4=NS.po.AuxGraph+"#4"
graphidAUX5=NS.po.AuxGraph+"#5"
def __init__(self):
# ontology_triples=P.rdf.makeOntology()
# self.insertTriples(ontology_triples,self.graphidAUX) # SparQLQueries TTM
self.getAllTriples(self.graphidAUX)
self.ntriplesAUX=len(self.triples)
self.triplesAUX=self.triples
def getSnapshots(self,snaphot_type=None):
if not snaphot_type:
uri=NS.po.Snapshot
else:
uri=eval("NS.po.{}Snapshot".format(snaphot_type.title()))
# NS.po.InteractionSnapshot, NS.po.GmaneSnapshot
triples=(("?snapshot", a, uri),)
self.snapshots=P.sparql.functions.plainQueryValues(self.retrieveFromTriples(triples)) # SparQLQuery
def addTranslatesFromSnapshots(self,snapshots=None):
if snapshots==None:
if not hasattr(self,"snapshots"):
self.getSnapshots()
snapshots=self.snapshots
# query each snapshot to get translates through ontology
for snapshot in snapshots:
self.addTranslatesFromSnapshot(snapshot)
def addTranslatesFromSnapshot(self,snapshot):
# busco localdir e translates (GROUP BY?)
triples=(snapshot,NS.po.defaultXML,"?translate"),
translates=P.sparql.functions.plainQueryValues(self.retrieveFromTriples(triples))
triples=(snapshot,NS.po.localDir,"?localdir"),
localdir=P.sparql.functions.plainQueryValues(self.retrieveFromTriples(triples))[0]
self.tmp=locals()
# com os translates e o dir, carrego os translates
c("into translates from snapshot")
# P.utils.callDebugger()
for translate in translates:
fname=translate.split("/")[-1]
fname2="{}/{}".format(localdir,fname)
graphid=self.addTranslationFileToEndpoint(fname2,snapshot)
# add the relation of po:associatedTranslate to the "graphs" graph
def addTranslationFileToEndpoint(self,tfile,snapshot):
#http://purl.org/socialparticipation/po/AuxGraph#1
self.addLocalFileToEndpoint(tfile,self.graphidAUX)
c("copy intermediary triples in AUX")
self.getAllTriples(self.graphidAUX)
self.triplesAUXINT0=self.triples
self.ntriplesAUXINT0=len(self.triples)
c("first substitute")
delete=(
("?s","?p","?o"),
("?s3","?p3","?s"),
)
insert=(
("?s1","?p","?o"),
("?s1",NS.po.genericURI,"?s"),
("?s1",NS.po.snapshot,snapshot),
("?s3","?p3","?s1"),
)
where=(
("?s",a,NS.po.Participant),
("?s","?p","?o"),
("OPTIONAL","?s3","?p3","?s"),
)
# startB3_=""" SELECT ?s ?p ?o ?s3 ?p3 (uri(concat(?s,'--','%s') ) AS ?s1) {"""%(snapshot,)
bindline=" BIND(uri(concat(str(?s),'--','%s')) AS ?s1) "%(snapshot,)
body3close_= " } "+bindline +" } "
body3close_= bindline +" . } } "
querystring=P.sparql.functions.buildQuery(
triples1=delete,graph1=self.graphidAUX,
triples2=insert,graph2=self.graphidAUX,
triples3=where, graph3=self.graphidAUX,
body3close_=body3close_,
method="delete_insert_where")
#self.mquery2=querystring
#self.updateQuery(querystring)
c("second insert")
insert=("?m",NS.po.snapshot,snapshot),
where= ("?m",a,NS.po.InteractionInstance), # tw,gmane:message or fb interaction
querystring=P.sparql.functions.buildQuery(triples1=insert,graph1=self.graphidAUX,
triples2=where,graph2=self.graphidAUX,method="insert_where")
self.updateQuery(querystring)
c("copy intermediary triples in AUX")
self.getAllTriples(self.graphidAUX)
self.triplesAUXINT=self.triples
self.ntriplesAUXINT=len(self.triples)
c("graph move")
delete=("?s","?p","?o"), # aux
insert=("?s","?p","?o"), # DEFAULT
where=(
("?s",NS.po.snapshot,snapshot), # aux
("?s","?p","?o"),
)
querystring=P.sparql.functions.buildQuery(
triples1=delete,graph1=self.graphidAUX,
triples2=insert,#graph2=default,#graph2="DEFAULT",
triples3=where,graph3=self.graphidAUX,
method="delete_insert_where")
#self.updateQuery(querystring)
self.mquery=querystring
querystring=P.sparql.functions.buildQuery(
triples1=delete,graph1=self.graphidAUX,
triples2=where,graph2=self.graphidAUX,
method="delete_where")
self.updateQuery(querystring)
self.mqueryd=querystring
c("delete trash (symmetric property and metafile for now)")
delete=("?s","?p","?o"),
where=(
("?s","?p",NS.owl.SymmetricProperty),
("?s","?p","?o"),
)
querystring=P.sparql.functions.buildQuery(triples1=delete,graph1=self.graphidAUX,
triples2=where,graph2=self.graphidAUX,
method="delete_where")
delete=("?s","?p","?o"),
where=(
(snapshot,"?p","?o"),
("?s","?p","?o"),
)
querystring=P.sparql.functions.buildQuery(triples1=delete,graph1=self.graphidAUX,
triples2=where,graph2=self.graphidAUX,
method="delete_where")
self.updateQuery(querystring)
self.getNTriples(self.graphidAUX)
if self.ntriples==self.ntriplesAUX:
c("graphAUX restored correctly")
else:
c("somethig went wrong in restoring graphidAUX, keeping record")
self.ntriplesAUX2=self.ntriples
self.getAllTriples(self.graphidAUX)
self.triplesAUX2=self.triples
c("insert file path of translation to default graph and finish")
triples=(snapshot,NS.po.translateFilePath,tfile),
self.insertTriples(triples)
# if empty afterwards, make dummy inference graph to copy triples from or load rdfs file
def addMetafileToEndpoint(self,tfile):
self.addLocalFileToEndpoint(tfile) # SparQLQueries
snapshoturi=[i for i in P.sparql.functions.performFileGetQuery(tfile,(("?s",a,NS.po.Snapshot),))][0][0]
snapshotsubclass=P.utils.identifyProvenance(tfile)
triples=(
(snapshoturi,a,snapshotsubclass), # Gmane, FB, TW, ETC
(snapshoturi,NS.po.localDir,os.path.dirname(tfile)),
(snapshoturi,NS.po.metaFilePath,tfile),
)
self.insertTriples(triples) # SparQLQueries
def makeNetwork(self,relation_uri,label_uri=None,rtype=1,directed=False):
"""Make network from data SparQL queried in endpoint_url.
relation_uri hold the predicate uri to which individuals are the range or oboth range and domain.
label_uri hold the predicate to which the range is the label (e.g. name or nick) of the individual.
rtype indicate which type of structure to be queried, as exposed in:
http://ttm.github.io/doc/semantic/report/2015/12/05/semantic-social-networks.html
directed indicated weather the resulting network is a digraph or not."""
sparql=self.endpoint
if label_uri:
mvars="i1","l1","i2","l2"
label_qpart="""?i1 {} ?l1 .
?i2 {} ?l2 .""".format(label_uri,label_uri)
else:
mvars="i1","i2"
label_qpart=""
tvars=" ".join(["?{}" for i in mvars])
if rtype==1: # direct relation
query="""SELECT {}
WHERE {{ ?i1 {} ?i2 .
{} }}""".format(tvars,relation_uri,label_qpart)
elif rtype==2: # mediated relation
query="""SELECT {}
WHERE {{ ?foo {} ?i1 .
?foo {} ?i2 .
{} }}""".format(tvars,relation_uri,relation_uri,label_qpart)
elif rtype==3: # twice mediated relation
query="""SELECT {}
WHERE {{ ?foo ?baz ?bar .
?foo {} ?i1 .
?bar {} ?i2 .
{} }}""".format(tvars,relation_uri,relation_uri,label_qpart)
else:
raise ValueError("rtype --> {} <-- not valid".format(rtype))
c("query build ok")
res=P.utils.mQuery(sparql,query,mvars)
c("response received")
if directed:
dg=x.DiGraph()
else:
dg=x.Graph()
for rel in res:
id1,l1,id2,l2=rel
if dg.has_node(id1): dg.node[id1]["weight"]+=1.
else: dg.add_node(id1,label=l1,weight=1.)
if dg.has_node(id2): dg.node[id2]["weight"]+=1.
else: dg.add_node(id2,label=l2,weight=1.)
if dg.has_edge(id1,id2): dg[id1][id2]["weight"]+=1.
else: dg.add_edge(id1,id2,weight=2.)
c("graph done")
return dg
class Client(SparQLClient,SparQLQueries):
"""Class that holds sparql endpoint connection and convenienves for query"""
def __init__(self,endpoint_url):
SparQLClient.__init__(self,endpoint_url)
class LegacyClient(SparQLClient,SparQLQueries,SparQLLegacyConvenience):
"""Class that holds sparql endpoint connection and convenienves for query and renderind analysis strictures, tables and figures"""
def __init__(self,endpoint_url):
SparQLClient.__init__(self,endpoint_url)
SparQLLegacyConvenience.__init__(self)
|
mit
|
bischjer/auxiliary
|
aux/engine/actor/reactor.py
|
3
|
1565
|
from multiprocessing import Process, Pipe
from aux.engine.actor.base import BaseActor
import select
class Reactor(BaseActor):
def __init__(self, name, looper="select"):
self.parent = super(Reactor, self)
self.parent.__init__(name)
self.callbacks = list()
self.shouldStop = False
self.p1_in, self.p1_out = Pipe()
self.p2_in, self.p2_out = Pipe()
self.select_timeout = 0.5
def add_callback(self, callback):
self.callbacks.append(callback)
def mainloop(self, select_time_out, p_in, p_out):
should_run = True
reads = list()
writes = list()
exceptions = list()
while ( should_run ):
data = p_in.recv()
if 'should_run=False' in data:
should_run = False
select.select(reads,
writes,
exceptions,
self.select_timeout)
# p_out.send("reactor send")
# should_run = False
def start(self):
reactor_process = Process(target=self.mainloop,
args=(self.select_timeout,
self.p1_out,
self.p2_in))
reactor_process.daemon = True
reactor_process.start()
self.parent.set_running(True)
def stop(self):
self.p1_in.send('should_run=False')
print self.p2_out.recv()
self.parent.set_running(False)
|
bsd-3-clause
|
msvbhat/distaf
|
tests_d/example/test_basic_gluster_tests.py
|
1
|
1966
|
# This file is part of DiSTAF
# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from distaf.util import tc, testcase
from distaf.distaf_base_class import DistafTestClass
from distaf.mount_ops import mount_volume, umount_volume
from distaf.volume_ops import setup_vol, stop_volume, delete_volume
@testcase("gluster_basic_test")
class gluster_basic_test(DistafTestClass):
"""
runs_on_volumes: ALL
runs_on_protocol: [ glusterfs, nfs ]
reuse_setup: True
"""
def run(self):
_rc = True
client = self.clients[0]
tc.run(self.mnode, "gluster volume status %s" % self.volname)
ret, _, _ = mount_volume(self.volname, self.mount_proto, \
self.mountpoint, mclient=client)
if ret != 0:
tc.logger.error("Unable to mount the volume %s in %s" \
"Please check the logs" % (self.volname, client))
return False
ret, _, _ = tc.run(client, "cp -r /etc %s" % self.mountpoint)
if ret != 0:
tc.logger.error("cp failed in %s. Please check the logs" % client)
_rc = False
tc.run(client, "rm -rf %s/etc" % self.mountpoint)
umount_volume(client, self.mountpoint)
return _rc
|
gpl-2.0
|
zsdonghao/tensorlayer
|
examples/basic_tutorials/tutorial_mnist_simple.py
|
1
|
2087
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
import tensorlayer as tl
tf.logging.set_verbosity(tf.logging.DEBUG)
tl.logging.set_verbosity(tl.logging.DEBUG)
sess = tf.InteractiveSession()
# prepare data
X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784))
# define placeholder
x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
y_ = tf.placeholder(tf.int64, shape=[None], name='y_')
# define the network
network = tl.layers.InputLayer(x, name='input')
network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1')
network = tl.layers.DenseLayer(network, 800, tf.nn.relu, name='relu1')
network = tl.layers.DropoutLayer(network, keep=0.5, name='drop2')
network = tl.layers.DenseLayer(network, 800, tf.nn.relu, name='relu2')
network = tl.layers.DropoutLayer(network, keep=0.5, name='drop3')
# the softmax is implemented internally in tl.cost.cross_entropy(y, y_) to
# speed up computation, so we use identity here.
# see tf.nn.sparse_softmax_cross_entropy_with_logits()
network = tl.layers.DenseLayer(network, n_units=10, act=None, name='output')
# define cost function and metric.
y = network.outputs
cost = tl.cost.cross_entropy(y, y_, name='cost')
correct_prediction = tf.equal(tf.argmax(y, 1), y_)
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
y_op = tf.argmax(tf.nn.softmax(y), 1)
# define the optimizer
train_params = network.all_params
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost, var_list=train_params)
# initialize all variables in the session
tl.layers.initialize_global_variables(sess)
# print network information
network.print_params()
network.print_layers()
# train the network
tl.utils.fit(sess, network, train_op, cost, X_train, y_train, x, y_, acc=acc, batch_size=500, \
n_epoch=500, print_freq=5, X_val=X_val, y_val=y_val, eval_train=False)
# evaluation
tl.utils.test(sess, network, acc, X_test, y_test, x, y_, batch_size=None, cost=cost)
# save the network to .npz file
tl.files.save_npz(network.all_params, name='model.npz')
sess.close()
|
apache-2.0
|
iivic/BoiseStateX
|
lms/djangoapps/shoppingcart/processors/helpers.py
|
169
|
1025
|
"""
Helper methods for credit card processing modules.
These methods should be shared among all processor implementations,
but should NOT be imported by modules outside this package.
"""
from django.conf import settings
from microsite_configuration import microsite
def get_processor_config():
"""
Return a dictionary of configuration settings for the active credit card processor.
If we're in a microsite and overrides are available, return those instead.
Returns:
dict
"""
# Retrieve the configuration settings for the active credit card processor
config = settings.CC_PROCESSOR.get(
settings.CC_PROCESSOR_NAME, {}
)
# Check whether we're in a microsite that overrides our configuration
# If so, find the microsite-specific configuration in the 'microsites'
# sub-key of the normal processor configuration.
config_key = microsite.get_value('cybersource_config_key')
if config_key:
config = config['microsites'][config_key]
return config
|
agpl-3.0
|
C-Blu/npyscreen
|
build/lib/npyscreen/stdfmemail.py
|
15
|
9374
|
import curses
import weakref
import npyscreen
import email
import mimetypes
import os.path
class EmailTreeLine(npyscreen.TreeLine):
def display_value(self, vl):
return vl
if vl:
return vl.getContent().get_content_type()
else:
return ""
class EmailTree(npyscreen.MultiLineTreeNew):
_contained_widgets = EmailTreeLine
def display_value(self, vl):
return vl.getContent().get_content_type()
#return vl.get_content_type()
def h_select(self, ch):
if self.values[self.cursor_line].hasChildren():
self.cursor_line += 1
return False
try:
value = [weakref.proxy(self.values[self.cursor_line]),]
except TypeError:
# Actually, this is inefficient, since with the NPSTree class (default) we will always be here - since by default we will
# try to create a weakref to a weakref, and that will fail with a type-error. BUT we are only doing it on a keypress, so
# it shouldn't create a huge performance hit, and is future-proof. Code replicated in h_select_exit
value = [self.values[self.cursor_line],]
self.parent.when_select_part(value)
self.editing = False
self.how_exited=npyscreen.wgwidget.EXITED_UP
self.hidden = True
def h_select_exit(self, ch):
self.h_select(ch)
def set_up_handlers(self):
super(EmailTree, self).set_up_handlers()
self.handlers.update({
ord('s'): self.h_save_message_part,
})
def h_save_message_part(self, ch):
self.parent.saveMessagePart()
npyscreen.notify_wait("Message part saved to your downloads folder: \n %s" % self.parent.DOWNLOAD_DIR)
class EmailPager(npyscreen.Pager):
def set_up_handlers(self):
super(EmailPager, self).set_up_handlers()
self.handlers.update({
curses.KEY_LEFT: self.h_exit_tree,
ord('s'): self.h_save_message_part,
ord('x'): self.h_exit_tree,
ord('q'): self.h_exit_tree,
curses.ascii.ESC: self.h_exit_tree,
})
def h_exit_tree(self, ch):
self.editing = False
self.how_exited = True
self.parent.when_show_tree(ch)
def h_save_message_part(self, ch):
self.parent.saveMessagePart()
npyscreen.notify_wait("Message part saved to your downloads folder: \n %s" % self.parent.DOWNLOAD_DIR)
class EmailViewFm(npyscreen.SplitFormWithMenus):
BLANK_COLUMNS_RIGHT= 1
SHORT_HEADER_LIST = ('from', 'to', 'cc', 'bcc' 'date', 'subject', 'reply-to')
DOWNLOAD_DIR = os.path.expanduser("~/Downloads")
def setEmail(self, this_email):
#Clear everything
self.this_email = this_email
self.wSubject.value = ""
self.wFrom.value = ""
self.wDate.value = ""
self.wEmailBody.values = []
self.wStatusLine.value = ""
self.wEmailBody.hidden = True
self.wEmailBody.start_display_at = 0
self.wMessageTree.hidden = False
self.wMessageTree.cursor_line = 0
self.updateEmailTree()
self.wSubject.value = this_email['subject']
self.wFrom.value = this_email['from']
self.wDate.value = this_email['date']
def setValue(self, this_email):
return self.setEmail(this_email)
def updateEmailTree(self):
self._parse_email_tree(self.this_email)
self.wMessageTree.values = self._this_email_tree
def set_up_handlers(self):
super(EmailViewFm, self).set_up_handlers()
self.handlers.update({
})
def create(self):
self.m1 = self.add_menu(name="Read Email")
self.m1.addItemsFromList([
('View Short Headers', self.viewShortHeaders),
('View Full Headers', self.viewAllHeaders),
('View Message Tree', self.viewMessageTree),
('Save this Message Part', self.saveMessagePart),
('View Message Source', self.viewMessageSource),
])
self.nextrely = 1
self.wSubject = self.add(npyscreen.TitleText, begin_entry_at=10, editable=False,
use_two_lines=False, name = "Subject:")
self.wFrom = self.add(npyscreen.TitleText, begin_entry_at=10,
editable=False, name = "From:", ) #max_width=-8)
self.wDate = self.add(npyscreen.TitleText, begin_entry_at=10,
editable=False, name = "Date:")
self.draw_line_at = self.nextrely
self.nextrely += 1
_body_rely = self.nextrely
self.wEmailBody = self.add(EmailPager, max_height=-1, scroll_exit=True, hidden=True)
self.nextrely = _body_rely
self.wMessageTree = self.add(EmailTree, max_height=-1, scroll_exit=True, hidden=False)
self.nextrely += 1
self.wStatusLine = self.add(npyscreen.FixedText,
editable=False,
use_max_space=True,
color='STANDOUT',
value="Status Line-Status Line-Status Line-Status Line-Status Line-Status Line-Status Line-")
def _parse_email_tree(self, this_email):
"Create an NPSTree representation of the email."
self._this_email_tree = npyscreen.NPSTreeData(content=this_email, ignoreRoot=False)
if this_email.is_multipart():
for part in this_email.get_payload():
self._tree_add_children(self._this_email_tree, part)
def _tree_add_children(self, tree_node, this_message_part):
use_part = this_message_part
this_child = tree_node.newChild(content=use_part)
try:
if use_part.is_multipart():
for part in use_part.get_payload():
self._tree_add_children(this_child, part)
except AttributeError:
# Dealing with a string only, not a message.
pass
def when_select_part(self, vl):
self.wEmailBody.hidden = False
self.wEmailBody.setValuesWrap(vl[0].getContent().get_payload(decode=True).decode(errors='replace').split("\n"))
self.wEmailBody.start_display_at = 0
self.wMessageTree.hidden = True
def when_show_tree(self, vl):
if self.wMessageTree.hidden:
self.wEmailBody.hidden = True
if self.wEmailBody.editing:
self.wEmailBody.h_exit_tree(vl)
self.wMessageTree.hidden = False
self.wStatusLine.value = ""
self.display()
def viewShortHeaders(self,):
s_header_list = []
for headers in self.SHORT_HEADER_LIST:
these_headers = self.this_email.get_all(headers)
if these_headers:
for h in these_headers:
s_header_list.append(str(headers).capitalize() + ": " + h.strip())
npyscreen.notify_confirm(s_header_list, wide=True, wrap=False)
def saveMessagePart(self, vl=None):
if vl == None:
vl = self.wMessageTree.values[self.wMessageTree.cursor_line].getContent()
if vl.is_multipart():
for v in vl.get_payload():
self.saveMessagePart(v)
else:
self._savePartToFile(vl)
def _savePartToFile(self, messagePart):
fn = messagePart.get_filename()
counter = 0
if not fn:
ext = mimetypes.guess_extension(messagePart.get_content_type()) # Bug in python returns .ksh for text/plain. Wait for python fix?
if not ext:
# generic extension?
ext = '.bin'
fn = 'emailpart%s' % (ext)
fn = os.path.basename(fn) # Sanitize Filename.
attempted_filename = fn
while os.path.exists(os.path.join(self.DOWNLOAD_DIR, attempted_filename)):
counter += 1
attempted_filename = "%s%s%s" % (os.path.splitext(fn)[0], counter, os.path.splitext(fn)[1])
fn = attempted_filename
fqfn = os.path.join(self.DOWNLOAD_DIR, fn)
if messagePart.get_content_maintype() == "text":
with open(fqfn, 'w') as f:
f.write(messagePart.get_payload(decode=True))
else:
with open(fqfn, 'wb') as f:
f.write(messagePart.get_payload(decode=True))
def viewAllHeaders(self,):
s_header_list = []
for headers in list(self.this_email.keys()):
these_headers = self.this_email.get_all(headers)
if these_headers:
for h in these_headers:
s_header_list.append(str(headers).capitalize() + ": " + h.strip())
npyscreen.notify_confirm(s_header_list, wide=True, wrap=True)
def viewMessageTree(self,):
self.wEmailBody.h_exit_tree(None)
self.wEmailBody.hidden = True
def viewMessageSource(self,):
npyscreen.notify_confirm(self.this_email.as_string(), wide=True)
|
bsd-2-clause
|
netscaler/horizon
|
openstack_dashboard/dashboards/project/networks/forms.py
|
11
|
2268
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class UpdateNetwork(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"), required=False)
tenant_id = forms.CharField(widget=forms.HiddenInput)
network_id = forms.CharField(label=_("ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
admin_state = forms.BooleanField(label=_("Admin State"), required=False)
failure_url = 'horizon:project:networks:index'
def handle(self, request, data):
try:
params = {'admin_state_up': data['admin_state'],
'name': data['name']}
network = api.neutron.network_modify(request, data['network_id'],
**params)
msg = _('Network %s was successfully updated.') % data['name']
LOG.debug(msg)
messages.success(request, msg)
return network
except Exception:
msg = _('Failed to update network %s') % data['name']
LOG.info(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
|
apache-2.0
|
calebmadrigal/radio-hacking-scripts
|
auto_crop_signal.py
|
1
|
2186
|
import sys
import scipy
import numpy as np
def read_pcm_file(file_path, file_type=scipy.complex64):
with open(file_path, 'rb') as f:
return scipy.fromfile(f, dtype=file_type)
def write_pcm_file(file_path, signal_data, file_type='complex64'):
np.array(signal_data).astype('complex64').tofile(file_path)
def auto_crop_signal(signal_data, margin_percent=5, num_chunks=16):
""" Break the signal into chunks, and find the chunk there is the largest
jump from quiet to loud (start index), and the largest jump from
loud to quiet (stop index). """
chunk_size = int(len(signal_data) / num_chunks)
largest_increase_index = 0
largest_increase_size = -999999999
largest_decrease_index = chunk_size * num_chunks
largest_decrease_size = 999999999
last_chunk_sum = sum([abs(i) for i in signal_data[0:chunk_size]])
for chunk_start in range(0, len(signal_data), chunk_size):
chunk = signal_data[chunk_start:chunk_start+chunk_size]
# Don't consider the last chunk if it's not a full chunk,
# since that will likely yield the smallest sum
if len(chunk) < chunk_size:
continue
chunk_sum = sum([abs(i) for i in chunk])
chunk_diff = chunk_sum - last_chunk_sum
last_chunk_sum = chunk_sum
if chunk_diff > largest_increase_size:
largest_increase_size = chunk_diff
largest_increase_index = chunk_start
if chunk_diff < largest_decrease_size:
largest_decrease_size = chunk_diff
largest_decrease_index = chunk_start
margin = int((largest_decrease_index - largest_increase_index) * (margin_percent / 100))
return signal_data[largest_increase_index-margin:largest_decrease_index+margin]
if __name__ == '__main__':
try:
in_file_path = sys.argv[1]
out_file_path = sys.argv[2]
except IndexError:
print('Usage: python auto_crop_signal.py <in file> <out file>')
sys.exit(1)
signal_data = read_pcm_file(in_file_path)
cropped_signal = auto_crop_signal(signal_data)
write_pcm_file(out_file_path, cropped_signal)
print('Wrote auto-cropped signal to:', out_file_path)
|
mit
|
abhishekgahlot/scikit-learn
|
sklearn/externals/joblib/memory.py
|
50
|
36180
|
"""
A context object for caching a function's return value each time it
is called with the same input arguments.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
from __future__ import with_statement
import os
import shutil
import time
import pydoc
import re
import sys
try:
import cPickle as pickle
except ImportError:
import pickle
import functools
import traceback
import warnings
import inspect
import json
import weakref
# Local imports
from . import hashing
from .func_inspect import get_func_code, get_func_name, filter_args
from .func_inspect import format_signature, format_call
from .logger import Logger, format_time, pformat
from . import numpy_pickle
from .disk import mkdirp, rm_subdirs
from ._compat import _basestring
FIRST_LINE_TEXT = "# first line:"
# TODO: The following object should have a data store object as a sub
# object, and the interface to persist and query should be separated in
# the data store.
#
# This would enable creating 'Memory' objects with a different logic for
# pickling that would simply span a MemorizedFunc with the same
# store (or do we want to copy it to avoid cross-talks?), for instance to
# implement HDF5 pickling.
# TODO: Same remark for the logger, and probably use the Python logging
# mechanism.
def extract_first_line(func_code):
""" Extract the first line information from the function code
text if available.
"""
if func_code.startswith(FIRST_LINE_TEXT):
func_code = func_code.split('\n')
first_line = int(func_code[0][len(FIRST_LINE_TEXT):])
func_code = '\n'.join(func_code[1:])
else:
first_line = -1
return func_code, first_line
class JobLibCollisionWarning(UserWarning):
""" Warn that there might be a collision between names of functions.
"""
def _get_func_fullname(func):
"""Compute the part of part associated with a function.
See code of_cache_key_to_dir() for details
"""
modules, funcname = get_func_name(func)
modules.append(funcname)
return os.path.join(*modules)
def _cache_key_to_dir(cachedir, func, argument_hash):
"""Compute directory associated with a given cache key.
func can be a function or a string as returned by _get_func_fullname().
"""
parts = [cachedir]
if isinstance(func, _basestring):
parts.append(func)
else:
parts.append(_get_func_fullname(func))
if argument_hash is not None:
parts.append(argument_hash)
return os.path.join(*parts)
def _load_output(output_dir, func_name, timestamp=None, metadata=None,
mmap_mode=None, verbose=0):
"""Load output of a computation."""
if verbose > 1:
signature = ""
try:
if metadata is not None:
args = ", ".join(['%s=%s' % (name, value)
for name, value
in metadata['input_args'].items()])
signature = "%s(%s)" % (os.path.basename(func_name),
args)
else:
signature = os.path.basename(func_name)
except KeyError:
pass
if timestamp is not None:
t = "% 16s" % format_time(time.time() - timestamp)
else:
t = ""
if verbose < 10:
print('[Memory]%s: Loading %s...' % (t, str(signature)))
else:
print('[Memory]%s: Loading %s from %s' % (
t, str(signature), output_dir))
filename = os.path.join(output_dir, 'output.pkl')
if not os.path.isfile(filename):
raise KeyError(
"Non-existing cache value (may have been cleared).\n"
"File %s does not exist" % filename)
return numpy_pickle.load(filename, mmap_mode=mmap_mode)
# An in-memory store to avoid looking at the disk-based function
# source code to check if a function definition has changed
_FUNCTION_HASHES = weakref.WeakKeyDictionary()
###############################################################################
# class `MemorizedResult`
###############################################################################
class MemorizedResult(Logger):
"""Object representing a cached value.
Attributes
----------
cachedir: string
path to root of joblib cache
func: function or string
function whose output is cached. The string case is intended only for
instanciation based on the output of repr() on another instance.
(namely eval(repr(memorized_instance)) works).
argument_hash: string
hash of the function arguments
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}
The memmapping mode used when loading from cache numpy arrays. See
numpy.load for the meaning of the different values.
verbose: int
verbosity level (0 means no message)
timestamp, metadata: string
for internal use only
"""
def __init__(self, cachedir, func, argument_hash,
mmap_mode=None, verbose=0, timestamp=None, metadata=None):
Logger.__init__(self)
if isinstance(func, _basestring):
self.func = func
else:
self.func = _get_func_fullname(func)
self.argument_hash = argument_hash
self.cachedir = cachedir
self.mmap_mode = mmap_mode
self._output_dir = _cache_key_to_dir(cachedir, self.func,
argument_hash)
if metadata is not None:
self.metadata = metadata
else:
self.metadata = {}
# No error is relevant here.
try:
with open(os.path.join(self._output_dir, 'metadata.json'),
'rb') as f:
self.metadata = json.load(f)
except:
pass
self.duration = self.metadata.get('duration', None)
self.verbose = verbose
self.timestamp = timestamp
def get(self):
"""Read value from cache and return it."""
return _load_output(self._output_dir, _get_func_fullname(self.func),
timestamp=self.timestamp,
metadata=self.metadata, mmap_mode=self.mmap_mode,
verbose=self.verbose)
def clear(self):
"""Clear value from cache"""
shutil.rmtree(self._output_dir, ignore_errors=True)
def __repr__(self):
return ('{class_name}(cachedir="{cachedir}", func="{func}", '
'argument_hash="{argument_hash}")'.format(
class_name=self.__class__.__name__,
cachedir=self.cachedir,
func=self.func,
argument_hash=self.argument_hash
))
def __reduce__(self):
return (self.__class__, (self.cachedir, self.func, self.argument_hash),
{'mmap_mode': self.mmap_mode})
class NotMemorizedResult(object):
"""Class representing an arbitrary value.
This class is a replacement for MemorizedResult when there is no cache.
"""
__slots__ = ('value', 'valid')
def __init__(self, value):
self.value = value
self.valid = True
def get(self):
if self.valid:
return self.value
else:
raise KeyError("No value stored.")
def clear(self):
self.valid = False
self.value = None
def __repr__(self):
if self.valid:
return '{class_name}({value})'.format(
class_name=self.__class__.__name__,
value=pformat(self.value)
)
else:
return self.__class__.__name__ + ' with no value'
# __getstate__ and __setstate__ are required because of __slots__
def __getstate__(self):
return {"valid": self.valid, "value": self.value}
def __setstate__(self, state):
self.valid = state["valid"]
self.value = state["value"]
###############################################################################
# class `NotMemorizedFunc`
###############################################################################
class NotMemorizedFunc(object):
"""No-op object decorating a function.
This class replaces MemorizedFunc when there is no cache. It provides an
identical API but does not write anything on disk.
Attributes
----------
func: callable
Original undecorated function.
"""
# Should be a light as possible (for speed)
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def call_and_shelve(self, *args, **kwargs):
return NotMemorizedResult(self.func(*args, **kwargs))
def __reduce__(self):
return (self.__class__, (self.func,))
def __repr__(self):
return '%s(func=%s)' % (
self.__class__.__name__,
self.func
)
def clear(self, warn=True):
# Argument "warn" is for compatibility with MemorizedFunc.clear
pass
###############################################################################
# class `MemorizedFunc`
###############################################################################
class MemorizedFunc(Logger):
""" Callable object decorating a function for caching its return value
each time it is called.
All values are cached on the filesystem, in a deep directory
structure. Methods are provided to inspect the cache or clean it.
Attributes
----------
func: callable
The original, undecorated, function.
cachedir: string
Path to the base cache directory of the memory context.
ignore: list or None
List of variable names to ignore when choosing whether to
recompute.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the different
values.
compress: boolean, or integer
Whether to zip the stored data on disk. If an integer is
given, it should be between 1 and 9, and sets the amount
of compression. Note that compressed arrays cannot be
read by memmapping.
verbose: int, optional
The verbosity flag, controls messages that are issued as
the function is evaluated.
"""
#-------------------------------------------------------------------------
# Public interface
#-------------------------------------------------------------------------
def __init__(self, func, cachedir, ignore=None, mmap_mode=None,
compress=False, verbose=1, timestamp=None):
"""
Parameters
----------
func: callable
The function to decorate
cachedir: string
The path of the base directory to use as a data store
ignore: list or None
List of variable names to ignore.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments.
compress : boolean, or integer
Whether to zip the stored data on disk. If an integer is
given, it should be between 1 and 9, and sets the amount
of compression. Note that compressed arrays cannot be
read by memmapping.
verbose: int, optional
Verbosity flag, controls the debug messages that are issued
as functions are evaluated. The higher, the more verbose
timestamp: float, optional
The reference time from which times in tracing messages
are reported.
"""
Logger.__init__(self)
self.mmap_mode = mmap_mode
self.func = func
if ignore is None:
ignore = []
self.ignore = ignore
self._verbose = verbose
self.cachedir = cachedir
self.compress = compress
if compress and self.mmap_mode is not None:
warnings.warn('Compressed results cannot be memmapped',
stacklevel=2)
if timestamp is None:
timestamp = time.time()
self.timestamp = timestamp
mkdirp(self.cachedir)
try:
functools.update_wrapper(self, func)
except:
" Objects like ufunc don't like that "
if inspect.isfunction(func):
doc = pydoc.TextDoc().document(func)
# Remove blank line
doc = doc.replace('\n', '\n\n', 1)
# Strip backspace-overprints for compatibility with autodoc
doc = re.sub('\x08.', '', doc)
else:
# Pydoc does a poor job on other objects
doc = func.__doc__
self.__doc__ = 'Memoized version of %s' % doc
def _cached_call(self, args, kwargs):
"""Call wrapped function and cache result, or read cache if available.
This function returns the wrapped function output and some metadata.
Returns
-------
output: value or tuple
what is returned by wrapped function
argument_hash: string
hash of function arguments
metadata: dict
some metadata about wrapped function call (see _persist_input())
"""
# Compare the function code with the previous to see if the
# function code has changed
output_dir, argument_hash = self._get_output_dir(*args, **kwargs)
metadata = None
# FIXME: The statements below should be try/excepted
if not (self._check_previous_func_code(stacklevel=4) and
os.path.exists(output_dir)):
if self._verbose > 10:
_, name = get_func_name(self.func)
self.warn('Computing func %s, argument hash %s in '
'directory %s'
% (name, argument_hash, output_dir))
out, metadata = self.call(*args, **kwargs)
if self.mmap_mode is not None:
# Memmap the output at the first call to be consistent with
# later calls
out = _load_output(output_dir, _get_func_fullname(self.func),
timestamp=self.timestamp,
mmap_mode=self.mmap_mode,
verbose=self._verbose)
else:
try:
t0 = time.time()
out = _load_output(output_dir, _get_func_fullname(self.func),
timestamp=self.timestamp,
metadata=metadata, mmap_mode=self.mmap_mode,
verbose=self._verbose)
if self._verbose > 4:
t = time.time() - t0
_, name = get_func_name(self.func)
msg = '%s cache loaded - %s' % (name, format_time(t))
print(max(0, (80 - len(msg))) * '_' + msg)
except Exception:
# XXX: Should use an exception logger
self.warn('Exception while loading results for '
'(args=%s, kwargs=%s)\n %s' %
(args, kwargs, traceback.format_exc()))
shutil.rmtree(output_dir, ignore_errors=True)
out, metadata = self.call(*args, **kwargs)
argument_hash = None
return (out, argument_hash, metadata)
def call_and_shelve(self, *args, **kwargs):
"""Call wrapped function, cache result and return a reference.
This method returns a reference to the cached result instead of the
result itself. The reference object is small and pickeable, allowing
to send or store it easily. Call .get() on reference object to get
result.
Returns
-------
cached_result: MemorizedResult or NotMemorizedResult
reference to the value returned by the wrapped function. The
class "NotMemorizedResult" is used when there is no cache
activated (e.g. cachedir=None in Memory).
"""
_, argument_hash, metadata = self._cached_call(args, kwargs)
return MemorizedResult(self.cachedir, self.func, argument_hash,
metadata=metadata, verbose=self._verbose - 1,
timestamp=self.timestamp)
def __call__(self, *args, **kwargs):
return self._cached_call(args, kwargs)[0]
def __reduce__(self):
""" We don't store the timestamp when pickling, to avoid the hash
depending from it.
In addition, when unpickling, we run the __init__
"""
return (self.__class__, (self.func, self.cachedir, self.ignore,
self.mmap_mode, self.compress, self._verbose))
def format_signature(self, *args, **kwargs):
warnings.warn("MemorizedFunc.format_signature will be removed in a "
"future version of joblib.", DeprecationWarning)
return format_signature(self.func, *args, **kwargs)
def format_call(self, *args, **kwargs):
warnings.warn("MemorizedFunc.format_call will be removed in a "
"future version of joblib.", DeprecationWarning)
return format_call(self.func, args, kwargs)
#-------------------------------------------------------------------------
# Private interface
#-------------------------------------------------------------------------
def _get_argument_hash(self, *args, **kwargs):
return hashing.hash(filter_args(self.func, self.ignore,
args, kwargs),
coerce_mmap=(self.mmap_mode is not None))
def _get_output_dir(self, *args, **kwargs):
""" Return the directory in which are persisted the result
of the function called with the given arguments.
"""
argument_hash = self._get_argument_hash(*args, **kwargs)
output_dir = os.path.join(self._get_func_dir(self.func),
argument_hash)
return output_dir, argument_hash
get_output_dir = _get_output_dir # backward compatibility
def _get_func_dir(self, mkdir=True):
""" Get the directory corresponding to the cache for the
function.
"""
func_dir = _cache_key_to_dir(self.cachedir, self.func, None)
if mkdir:
mkdirp(func_dir)
return func_dir
def _hash_func(self):
"""Hash a function to key the online cache"""
func_code_h = hash(getattr(self.func, '__code__', None))
return id(self.func), hash(self.func), func_code_h
def _write_func_code(self, filename, func_code, first_line):
""" Write the function code and the filename to a file.
"""
# We store the first line because the filename and the function
# name is not always enough to identify a function: people
# sometimes have several functions named the same way in a
# file. This is bad practice, but joblib should be robust to bad
# practice.
func_code = '%s %i\n%s' % (FIRST_LINE_TEXT, first_line, func_code)
with open(filename, 'w') as out:
out.write(func_code)
# Also store in the in-memory store of function hashes
is_named_callable = False
if sys.version_info[0] > 2:
is_named_callable = (hasattr(self.func, '__name__')
and self.func.__name__ != '<lambda>')
else:
is_named_callable = (hasattr(self.func, 'func_name')
and self.func.func_name != '<lambda>')
if is_named_callable:
# Don't do this for lambda functions or strange callable
# objects, as it ends up being too fragile
func_hash = self._hash_func()
try:
_FUNCTION_HASHES[self.func] = func_hash
except TypeError:
# Some callable are not hashable
pass
def _check_previous_func_code(self, stacklevel=2):
"""
stacklevel is the depth a which this function is called, to
issue useful warnings to the user.
"""
# First check if our function is in the in-memory store.
# Using the in-memory store not only makes things faster, but it
# also renders us robust to variations of the files when the
# in-memory version of the code does not vary
try:
if self.func in _FUNCTION_HASHES:
# We use as an identifier the id of the function and its
# hash. This is more likely to falsely change than have hash
# collisions, thus we are on the safe side.
func_hash = self._hash_func()
if func_hash == _FUNCTION_HASHES[self.func]:
return True
except TypeError:
# Some callables are not hashable
pass
# Here, we go through some effort to be robust to dynamically
# changing code and collision. We cannot inspect.getsource
# because it is not reliable when using IPython's magic "%run".
func_code, source_file, first_line = get_func_code(self.func)
func_dir = self._get_func_dir()
func_code_file = os.path.join(func_dir, 'func_code.py')
try:
with open(func_code_file) as infile:
old_func_code, old_first_line = \
extract_first_line(infile.read())
except IOError:
self._write_func_code(func_code_file, func_code, first_line)
return False
if old_func_code == func_code:
return True
# We have differing code, is this because we are referring to
# different functions, or because the function we are referring to has
# changed?
_, func_name = get_func_name(self.func, resolv_alias=False,
win_characters=False)
if old_first_line == first_line == -1 or func_name == '<lambda>':
if not first_line == -1:
func_description = '%s (%s:%i)' % (func_name,
source_file, first_line)
else:
func_description = func_name
warnings.warn(JobLibCollisionWarning(
"Cannot detect name collisions for function '%s'"
% func_description), stacklevel=stacklevel)
# Fetch the code at the old location and compare it. If it is the
# same than the code store, we have a collision: the code in the
# file has not changed, but the name we have is pointing to a new
# code block.
if not old_first_line == first_line and source_file is not None:
possible_collision = False
if os.path.exists(source_file):
_, func_name = get_func_name(self.func, resolv_alias=False)
num_lines = len(func_code.split('\n'))
with open(source_file) as f:
on_disk_func_code = f.readlines()[
old_first_line - 1:old_first_line - 1 + num_lines - 1]
on_disk_func_code = ''.join(on_disk_func_code)
possible_collision = (on_disk_func_code.rstrip()
== old_func_code.rstrip())
else:
possible_collision = source_file.startswith('<doctest ')
if possible_collision:
warnings.warn(JobLibCollisionWarning(
'Possible name collisions between functions '
"'%s' (%s:%i) and '%s' (%s:%i)" %
(func_name, source_file, old_first_line,
func_name, source_file, first_line)),
stacklevel=stacklevel)
# The function has changed, wipe the cache directory.
# XXX: Should be using warnings, and giving stacklevel
if self._verbose > 10:
_, func_name = get_func_name(self.func, resolv_alias=False)
self.warn("Function %s (stored in %s) has changed." %
(func_name, func_dir))
self.clear(warn=True)
return False
def clear(self, warn=True):
""" Empty the function's cache.
"""
func_dir = self._get_func_dir(mkdir=False)
if self._verbose > 0 and warn:
self.warn("Clearing cache %s" % func_dir)
if os.path.exists(func_dir):
shutil.rmtree(func_dir, ignore_errors=True)
mkdirp(func_dir)
func_code, _, first_line = get_func_code(self.func)
func_code_file = os.path.join(func_dir, 'func_code.py')
self._write_func_code(func_code_file, func_code, first_line)
def call(self, *args, **kwargs):
""" Force the execution of the function with the given arguments and
persist the output values.
"""
start_time = time.time()
output_dir, _ = self._get_output_dir(*args, **kwargs)
if self._verbose > 0:
print(format_call(self.func, args, kwargs))
output = self.func(*args, **kwargs)
self._persist_output(output, output_dir)
duration = time.time() - start_time
metadata = self._persist_input(output_dir, duration, args, kwargs)
if self._verbose > 0:
_, name = get_func_name(self.func)
msg = '%s - %s' % (name, format_time(duration))
print(max(0, (80 - len(msg))) * '_' + msg)
return output, metadata
# Make public
def _persist_output(self, output, dir):
""" Persist the given output tuple in the directory.
"""
try:
mkdirp(dir)
filename = os.path.join(dir, 'output.pkl')
numpy_pickle.dump(output, filename, compress=self.compress)
if self._verbose > 10:
print('Persisting in %s' % dir)
except OSError:
" Race condition in the creation of the directory "
def _persist_input(self, output_dir, duration, args, kwargs,
this_duration_limit=0.5):
""" Save a small summary of the call using json format in the
output directory.
output_dir: string
directory where to write metadata.
duration: float
time taken by hashing input arguments, calling the wrapped
function and persisting its output.
args, kwargs: list and dict
input arguments for wrapped function
this_duration_limit: float
Max execution time for this function before issuing a warning.
"""
start_time = time.time()
argument_dict = filter_args(self.func, self.ignore,
args, kwargs)
input_repr = dict((k, repr(v)) for k, v in argument_dict.items())
# This can fail due to race-conditions with multiple
# concurrent joblibs removing the file or the directory
metadata = {"duration": duration, "input_args": input_repr}
try:
mkdirp(output_dir)
with open(os.path.join(output_dir, 'metadata.json'), 'w') as f:
json.dump(metadata, f)
except:
pass
this_duration = time.time() - start_time
if this_duration > this_duration_limit:
# This persistence should be fast. It will not be if repr() takes
# time and its output is large, because json.dump will have to
# write a large file. This should not be an issue with numpy arrays
# for which repr() always output a short representation, but can
# be with complex dictionaries. Fixing the problem should be a
# matter of replacing repr() above by something smarter.
warnings.warn("Persisting input arguments took %.2fs to run.\n"
"If this happens often in your code, it can cause "
"performance problems \n"
"(results will be correct in all cases). \n"
"The reason for this is probably some large input "
"arguments for a wrapped\n"
" function (e.g. large strings).\n"
"THIS IS A JOBLIB ISSUE. If you can, kindly provide "
"the joblib's team with an\n"
" example so that they can fix the problem."
% this_duration, stacklevel=5)
return metadata
def load_output(self, output_dir):
""" Read the results of a previous calculation from the directory
it was cached in.
"""
warnings.warn("MemorizedFunc.load_output is deprecated and will be "
"removed in a future version\n"
"of joblib. A MemorizedResult provides similar features",
DeprecationWarning)
# No metadata available here.
return _load_output(output_dir, _get_func_fullname(self.func),
timestamp=self.timestamp,
mmap_mode=self.mmap_mode, verbose=self._verbose)
# XXX: Need a method to check if results are available.
#-------------------------------------------------------------------------
# Private `object` interface
#-------------------------------------------------------------------------
def __repr__(self):
return '%s(func=%s, cachedir=%s)' % (
self.__class__.__name__,
self.func,
repr(self.cachedir),
)
###############################################################################
# class `Memory`
###############################################################################
class Memory(Logger):
""" A context object for caching a function's return value each time it
is called with the same input arguments.
All values are cached on the filesystem, in a deep directory
structure.
see :ref:`memory_reference`
"""
#-------------------------------------------------------------------------
# Public interface
#-------------------------------------------------------------------------
def __init__(self, cachedir, mmap_mode=None, compress=False, verbose=1):
"""
Parameters
----------
cachedir: string or None
The path of the base directory to use as a data store
or None. If None is given, no caching is done and
the Memory object is completely transparent.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments.
compress: boolean, or integer
Whether to zip the stored data on disk. If an integer is
given, it should be between 1 and 9, and sets the amount
of compression. Note that compressed arrays cannot be
read by memmapping.
verbose: int, optional
Verbosity flag, controls the debug messages that are issued
as functions are evaluated.
"""
# XXX: Bad explanation of the None value of cachedir
Logger.__init__(self)
self._verbose = verbose
self.mmap_mode = mmap_mode
self.timestamp = time.time()
self.compress = compress
if compress and mmap_mode is not None:
warnings.warn('Compressed results cannot be memmapped',
stacklevel=2)
if cachedir is None:
self.cachedir = None
else:
self.cachedir = os.path.join(cachedir, 'joblib')
mkdirp(self.cachedir)
def cache(self, func=None, ignore=None, verbose=None,
mmap_mode=False):
""" Decorates the given function func to only compute its return
value for input arguments not cached on disk.
Parameters
----------
func: callable, optional
The function to be decorated
ignore: list of strings
A list of arguments name to ignore in the hashing
verbose: integer, optional
The verbosity mode of the function. By default that
of the memory object is used.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments. By default that of the memory object is used.
Returns
-------
decorated_func: MemorizedFunc object
The returned object is a MemorizedFunc object, that is
callable (behaves like a function), but offers extra
methods for cache lookup and management. See the
documentation for :class:`joblib.memory.MemorizedFunc`.
"""
if func is None:
# Partial application, to be able to specify extra keyword
# arguments in decorators
return functools.partial(self.cache, ignore=ignore,
verbose=verbose, mmap_mode=mmap_mode)
if self.cachedir is None:
return NotMemorizedFunc(func)
if verbose is None:
verbose = self._verbose
if mmap_mode is False:
mmap_mode = self.mmap_mode
if isinstance(func, MemorizedFunc):
func = func.func
return MemorizedFunc(func, cachedir=self.cachedir,
mmap_mode=mmap_mode,
ignore=ignore,
compress=self.compress,
verbose=verbose,
timestamp=self.timestamp)
def clear(self, warn=True):
""" Erase the complete cache directory.
"""
if warn:
self.warn('Flushing completely the cache')
rm_subdirs(self.cachedir)
def eval(self, func, *args, **kwargs):
""" Eval function func with arguments `*args` and `**kwargs`,
in the context of the memory.
This method works similarly to the builtin `apply`, except
that the function is called only if the cache is not
up to date.
"""
if self.cachedir is None:
return func(*args, **kwargs)
return self.cache(func)(*args, **kwargs)
#-------------------------------------------------------------------------
# Private `object` interface
#-------------------------------------------------------------------------
def __repr__(self):
return '%s(cachedir=%s)' % (
self.__class__.__name__,
repr(self.cachedir),
)
def __reduce__(self):
""" We don't store the timestamp when pickling, to avoid the hash
depending from it.
In addition, when unpickling, we run the __init__
"""
# We need to remove 'joblib' from the end of cachedir
cachedir = self.cachedir[:-7] if self.cachedir is not None else None
return (self.__class__, (cachedir,
self.mmap_mode, self.compress, self._verbose))
|
bsd-3-clause
|
Ragora/T2-ScriptDump
|
scripts/playerdb.py
|
1
|
2392
|
"""
Player datablock generator, written to test an experimental super-2046 DB count
patch.
"""
import sys
import math
class PseudoNumber:
_value = None
_digits = None
_decimal_value = None
_converted = None
def __init__(self, digits=None, value=None):
self._digits = digits
self._decimal_value = int(value)
self.convert()
def convert(self):
self._value = ''
if (self._decimal_value == 0):
self._value = self._digits[0]
return self._value
digit_count = len(self._digits) - 1
start_power = int(math.ceil(self._decimal_value / digit_count))
temporary = int(self._decimal_value)
for current_power in reversed(range(start_power + 1)):
for current_index, current_digit in enumerate(reversed(self._digits)):
actual_index = digit_count - current_index
actual_value = actual_index * pow(digit_count + 1, current_power)
if (actual_value <= temporary and actual_value != 0):
temporary -= actual_value
self._value += self._digits[actual_index % (digit_count + 1)]
if (temporary <= 0):
for power in range(current_power):
self._value += self._digits[0]
return self._value
return self._value
def __repr__(self):
return self.convert()
def __str__(self):
return self._value
def __int__(self):
return self._decimal_value
def __add__(self, other):
return PseudoNumber(self._digits, self._decimal_value + other)
class Application:
def main(self):
if (len(sys.argv) != 3):
print("Usage: %s <file count> <count>" % sys.argv[0])
return
datablock_count = int(sys.argv[2])
file_count = int(sys.argv[1])
datablock_iterations = datablock_count / file_count
current_digit = PseudoNumber('ABCDEFGHIJKLMNOPQRSTUVWXYZ', 0)
for file_iteration in range(file_count):
with open("out%u.cs" % file_iteration, "w") as handle:
handle.write("// Total Count: %u\n" % datablock_iterations)
for current_datablock_iteration in range(datablock_iterations):
handle.write("// Datablock #%u\n" % current_datablock_iteration)
handle.write("datablock PlayerData(%sMaleBiodermArmor) : LightMaleHumanArmor\n" % str(current_digit))
handle.write("{\n")
handle.write("\tshapeFile = \"bioderm_light.dts\";\n")
handle.write("};\n\n")
current_digit += 1
handle.write("exec(\"scripts/out%u.cs\");" % (file_iteration + 1))
if __name__ == "__main__":
Application().main()
|
mit
|
intel-iot-devkit/upm
|
examples/python/mcp2515.py
|
6
|
2931
|
#!/usr/bin/env python
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_mcp2515 as MCP2515
def main():
# Instantiate a MCP2515 on SPI bus 0 using a hw CS pin (-1).
sensor = MCP2515.MCP2515(0, -1)
## Exit handlers ##
# This function stops python from printing a stacktrace when you
# hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit, including functions from sensor
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
print("Setting loopback mode...")
# set the mode to loopback mode. In loopback mode, transmitted
# packets are sent directly to an appropriate receive buffer
# without actually going out onto the CAN bus.
sensor.setOpmode(MCP2515.MCP2515_OPMODE_LOOPBACK);
# lets build up a packet and try loading it (8 bytes max).
myPayload = "01234567";
print("Loading a packet of 8 numbers (0-7) into a TX buffer...")
sensor.loadTXBuffer(MCP2515.MCP2515_TX_BUFFER0, 0, False, False, myPayload);
# now lets try to transmit it
print("Transmitting packet...")
sensor.transmitBuffer(MCP2515.MCP2515_TX_BUFFER0, True);
print("Transmit successful")
# There should now be a packet waiting for us in RXB0
if (sensor.rxStatusMsgs() == MCP2515.MCP2515_RXMSG_RXB0):
print("Packet received in RXB0, decoding...")
# now lets retrieve and print it
sensor.getRXMsg(MCP2515.MCP2515_RX_BUFFER0);
sensor.printMsg();
else:
print("No packet found, how strange.")
print("Exiting...")
if __name__ == '__main__':
main()
|
mit
|
jerluc/wmii
|
alternative_wmiircs/python/pyxp/client.py
|
9
|
9838
|
# Copyright (C) 2009 Kris Maglione
import operator
import os
import re
import sys
from threading import *
import traceback
import pyxp
from pyxp import fcall, fields
from pyxp.mux import Mux
from pyxp.types import *
if os.environ.get('NAMESPACE', None):
namespace = os.environ['NAMESPACE']
else:
try:
namespace = '/tmp/ns.%s.%s' % (
os.environ['USER'],
re.sub(r'\.0$', '', os.environ['DISPLAY']))
except Exception:
pass
NAMESPACE = namespace
OREAD = 0x00
OWRITE = 0x01
ORDWR = 0x02
OEXEC = 0x03
OEXCL = 0x04
OTRUNC = 0x10
OREXEC = 0x20
ORCLOSE = 0x40
OAPPEND = 0x80
ROOT_FID = 0
class ProtocolException(Exception):
pass
class RPCError(Exception):
pass
class Client(object):
ROOT_FID = 0
@staticmethod
def respond(callback, data, exc=None, tb=None):
if hasattr(callback, 'func_code'):
callback(*(data, exc, tb)[0:callback.func_code.co_argcount])
elif callable(callback):
callback(data)
def __enter__(self):
return self
def __exit__(self, *args):
self._cleanup()
def __init__(self, conn=None, namespace=None, root=None):
if not conn and namespace:
conn = 'unix!%s/%s' % (NAMESPACE, namespace)
try:
self.lastfid = ROOT_FID
self.fids = set()
self.lock = RLock()
def process(data):
return fcall.Fcall.unmarshall(data)[1]
self.mux = Mux(conn, process, maxtag=256)
resp = self._dorpc(fcall.Tversion(version=pyxp.VERSION, msize=65535))
if resp.version != pyxp.VERSION:
raise ProtocolException, "Can't speak 9P version '%s'" % resp.version
self.msize = resp.msize
self._dorpc(fcall.Tattach(fid=ROOT_FID, afid=fcall.NO_FID,
uname=os.environ['USER'], aname=''))
if root:
path = self._splitpath(root)
resp = self._dorpc(fcall.Twalk(fid=ROOT_FID,
newfid=ROOT_FID,
wname=path))
except Exception:
traceback.print_exc(sys.stdout)
if getattr(self, 'mux', None):
self.mux.fd.close()
raise
def _cleanup(self):
try:
for f in self.files:
f.close()
finally:
self.mux.fd.close()
self.mux = None
def _dorpc(self, req, callback=None, error=None):
def doresp(resp):
if isinstance(resp, fcall.Rerror):
raise RPCError, "%s[%d] RPC returned error: %s" % (
req.__class__.__name__, resp.tag, resp.ename)
if req.type != resp.type ^ 1:
raise ProtocolException, "Missmatched RPC message types: %s => %s" % (
req.__class__.__name__, resp.__class__.__name__)
return resp
def next(mux, resp):
try:
res = doresp(resp)
except Exception, e:
self.respond(error or callback, None, e, None)
else:
self.respond(callback, res)
if not callback:
return doresp(self.mux.rpc(req))
self.mux.rpc(req, next)
def _splitpath(self, path):
if isinstance(path, list):
return path
return [v for v in path.split('/') if v != '']
def _getfid(self):
with self.lock:
if self.fids:
return self.fids.pop()
self.lastfid += 1
return self.lastfid
def _putfid(self, fid):
with self.lock:
self.fids.add(fid)
def _aclunk(self, fid, callback=None):
def next(resp, exc, tb):
if resp:
self._putfid(fid)
self.respond(callback, resp, exc, tb)
self._dorpc(fcall.Tclunk(fid=fid), next)
def _clunk(self, fid):
try:
self._dorpc(fcall.Tclunk(fid=fid))
finally:
self._putfid(fid)
def _walk(self, path):
fid = self._getfid()
ofid = ROOT_FID
while True:
self._dorpc(fcall.Twalk(fid=ofid, newfid=fid,
wname=path[0:fcall.MAX_WELEM]))
path = path[fcall.MAX_WELEM:]
ofid = fid
if len(path) == 0:
break
@apply
class Res:
def __enter__(res):
return fid
def __exit__(res, exc_type, exc_value, traceback):
if exc_type:
self._clunk(fid)
return Res
_file = property(lambda self: File)
def _open(self, path, mode, fcall, origpath=None):
resp = None
with self._walk(path) as nfid:
fid = nfid
fcall.fid = fid
resp = self._dorpc(fcall)
def cleanup():
self._aclunk(fid)
file = self._file(self, origpath or '/'.join(path), resp, fid, mode, cleanup)
return file
def open(self, path, mode=OREAD):
path = self._splitpath(path)
return self._open(path, mode, fcall.Topen(mode=mode))
def create(self, path, mode=OREAD, perm=0):
path = self._splitpath(path)
name = path.pop()
return self._open(path, mode, fcall.Tcreate(mode=mode, name=name, perm=perm),
origpath='/'.join(path + [name]))
def remove(self, path):
path = self._splitpath(path)
with self._walk(path) as fid:
self._dorpc(fcall.Tremove(fid=fid))
def stat(self, path):
path = self._splitpath(path)
try:
with self._walk(path) as fid:
resp = self._dorpc(fcall.Tstat(fid= fid))
st = resp.stat
self._clunk(fid)
return st
except RPCError:
return None
def read(self, path, *args, **kwargs):
with self.open(path) as f:
return f.read(*args, **kwargs)
def readlines(self, path, *args, **kwargs):
with self.open(path) as f:
for l in f.readlines(*args, **kwargs):
yield l
def readdir(self, path, *args, **kwargs):
with self.open(path) as f:
for s in f.readdir(*args, **kwargs):
yield s
def write(self, path, *args, **kwargs):
with self.open(path, OWRITE) as f:
return f.write(*args, **kwargs)
class File(object):
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __init__(self, client, path, fcall, fid, mode, cleanup):
self.lock = RLock()
self.client = client
self.path = path
self.fid = fid
self._cleanup = cleanup
self.mode = mode
self.iounit = fcall.iounit
self.qid = fcall.qid
self.closed = False
self.offset = 0
def __del__(self):
if not self.closed:
self._cleanup()
def _dorpc(self, fcall, async=None, error=None):
if hasattr(fcall, 'fid'):
fcall.fid = self.fid
return self.client._dorpc(fcall, async, error)
def stat(self):
resp = self._dorpc(fcall.Tstat())
return resp.stat
def read(self, count=None, offset=None, buf=''):
if count is None:
count = self.iounit
res = []
with self.lock:
offs = self.offset
if offset is not None:
offs = offset
while count > 0:
n = min(count, self.iounit)
count -= n
resp = self._dorpc(fcall.Tread(offset=offs, count=n))
data = resp.data
offs += len(data)
res.append(data)
if len(data) < n:
break
if offset is None:
self.offset = offs
return ''.join(res)
def readlines(self):
last = None
while True:
data = self.read()
if not data:
break
lines = data.split('\n')
if last:
lines[0] = last + lines[0]
last = None
for i in range(0, len(lines) - 1):
yield lines[i]
last = lines[-1]
if last:
yield last
def write(self, data, offset=None):
if offset is None:
offset = self.offset
off = 0
with self.lock:
offs = self.offset
if offset is not None:
offs = offset
while off < len(data):
n = min(len(data), self.iounit)
resp = self._dorpc(fcall.Twrite(offset=offs,
data=data[off:off+n]))
off += resp.count
offs += resp.count
if resp.count < n:
break
if offset is None:
self.offset = offs
return off
def readdir(self):
if not self.qid.type & Qid.QTDIR:
raise Exception, "Can only call readdir on a directory"
off = 0
while True:
data = self.read(self.iounit, off)
if not data:
break
off += len(data)
for s in Stat.unmarshall_list(data):
yield s
def close(self):
assert not self.closed
self.closed = True
try:
self._cleanup()
except:
pass
self.tg = None
self.fid = None
self.client = None
self.qid = None
def remove(self):
try:
self._dorpc(fcall.Tremove())
finally:
self.close()
# vim:se sts=4 sw=4 et:
|
mit
|
dumrelu/Snake
|
snake GPIO.py
|
1
|
6425
|
#!/usr/bin/python
import time
import os
from random import randint
GPIO.setmode(GPIO.BCM);
BUTON_SUS=17;BUTON_JOS=22;BUTON_STANGA=27;BUTON_DREAPTA=4;
LED0=23;LED1=24;LED2=25;
GPIO.setup(BUTON_SUS,GPIO.IN);
GPIO.setup(BUTON_JOS,GPIO.IN);
GPIO.setup(BUTON_STANGA,GPIO.IN);
GPIO.setup(BUTON_DREAPTA,GPIO.IN);
GPIO.setup(LED0,GPIO.OUT);
GPIO.setup(LED1,GPIO.OUT);
GPIO.setup(LED2,GPIO.OUT);
#Dimensiunea tablei de joc
lungimeTabla = 10;
latimeTabla = 10;
#numarul total de vieti
numarVieti = 7;
#scorul(numar de mancare mancata
scor = 0;
#coordonatele unde se va afla mancarea pe tabla de joc
mancare=[0,0];
#directiile in care sarpele se poate misca
UP = [-1,0];
DOWN = [1,0];
LEFT = [0,-1];
RIGHT = [0,1];
class Sarpe:
def __init__(self):
#coordonatele capului sarpelui
self.coordonate = [lungimeTabla/2,latimeTabla/2];
#corpul sarpelui e format initial din 2 parti(3 in total cu capul)
self.corp = [[self.coordonate[0], self.coordonate[1]+1], [self.coordonate[0], self.coordonate[1]+2]];
#directia initiala a sarpelui
self.directie = LEFT;
#indica daca sarpele ar trebui sa-si mareasca dimensiunea data viitoare
#cand se va misca
self.shouldGrown = False;
def grow(self):
'Indica faptul ca la urmatoarea miscare sarpele trebuie sa se miste'
self.shouldGrown = True;
def reset(self):
'Readuce sarpele la starea initiala'
self.coordonate = [lungimeTabla/2,latimeTabla/2];
self.corp = [[self.coordonate[0], self.coordonate[1]+1], [self.coordonate[0], self.coordonate[1]+2]];
self.directie = LEFT;
self.grown = False;
def setDirectie(self, directie):
'Seteaza directia in care se va misca sarpele'
#directia nu poate sa fie opusul directiei curente
if self.directie[0]!=-directie[0] or self.directie[0]!=-directie[0]:
self.directie=directie;
def move(self):
'Misca sarpele in directia curent setata'
#calculeaza coordonate noi
coordonateNoi = [self.coordonate[0]+self.directie[0],self.coordonate[1]+self.directie[1]];
#Verifica daca sarpele iese dupa tabla
if coordonateNoi[0] < 0 or coordonateNoi[0] >= lungimeTabla or coordonateNoi[1] < 0 or coordonateNoi[1] >= latimeTabla:
return False;
#Verifica daca sarpele se loveste de propriul corp
if coordonateNoi in self.corp:
return False;
#Memoreaza vechea coada sarpelui in caz ca va trebui adaugata o noua parte de corp
coordonateCoadaVeche = self.corp[len(self.corp)-1];
#Shifteaza corpul sarpelui
for index in range(len(self.corp)-1, 0, -1):
self.corp[index] = self.corp[index-1];
self.corp[0] = self.coordonate;
#actualizeaza coordonatele capului
self.coordonate = coordonateNoi;
#mareste sarpele daca este nevoie
if self.shouldGrown:
self.corp.append(coordonateCoadaVeche);
self.shouldGrown = False;
return True;
#sarpele
sarpe = Sarpe();
def clearScreen():
'Sterge continutul curent dupa ecran'
os.system('clear');
def genereazaMancare():
'Genereaza aleator o mancare pe tabla care nu se suprapune cu sarpele'
while True:
mancare[0] = randint(0,lungimeTabla-1);
mancare[1] = randint(0,latimeTabla-1);
if mancare != sarpe.coordonate and mancare not in sarpe.corp:
return;
def deseneazaTabla():
'Afiseaza tabla de joc'
#Sterge ce e pe ecran
clearScreen();
#genereaza o matrixe de 0-uri de dimensiunile: lungimeTabla x latimeTabla
matrice = [[0 for x in xrange(latimeTabla)] for x in xrange(lungimeTabla)];
'adauga sarpele in matrice'
#capul sarpelui
matrice[sarpe.coordonate[0]][sarpe.coordonate[1]] = 'O';
#corpul sarpelui
for parteCorp in sarpe.corp:
matrice[parteCorp[0]][parteCorp[1]] = '*';
#adauga mancarea in matrice
matrice[mancare[0]][mancare[1]] = '@';
#adauga matricei o margine
matrice.insert(0, ['-' for x in xrange(latimeTabla+2)]);
matrice.append(['-' for x in xrange(latimeTabla+2)]);
for index in range(1, len(matrice)-1):
matrice[index].insert(0, '|');
matrice[index].append('|');
#afiseaza matricea
for rand in matrice:
for coloana in rand:
if coloana != 0:
print coloana,
else:
print ' ',
print
#afiseaza vieti si scor
print 'Vieti:',numarVieti,'Scor:',scor
def afiseazaVieti():
''''Afiseaza scorul in binar folosind LED-urile
LED0 fiind cel mai putin semnificativ, LED2 cel mai semnificativ'''
#reprezentare string a numarului de vieti in binar pe 3 biti
binar = "{0:03b}".format(numarVieti);
GPIO.output(LED2,bool(int(binar[0])));
GPIO.output(LED1,bool(int(binar[1])));
GPIO.output(LED0,bool(int(binar[2])));
def start():
'Porneste jocul dupa o numaratoare inversa de la 3'
#genereaza prima mancare
genereazaMancare();
deseneazaTabla();
afiseazaVieti();
#numaratoare inversa pana de la 3
for i in range(3, 0,-1):
print "Jocul incepe in:",i;
time.sleep(1);
deseneazaTabla();
else:
deseneazaTabla();
print "GO!"
sarpe.reset();
time.sleep(0.5);
def puneDirectie():
'Citeste directia de la butoane si o pune sarpelui'
if GPIO.input(BUTON_SUS):
sarpe.setDirectie(UP);
elif GPIO.input(BUTON_JOS):
sarpe.setDirectie(DOWN);
elif GPIO.input(BUTON_STANGA):
sarpe.setDirectie(LEFT);
elif GPIO.input(BUTON_DREAPTA):
sarpe.setDirectie(RIGHT);
if __name__=="__main__":
#porneste jocul
start();
while numarVieti!=0:
puneDirectie();
if sarpe.move()==False:
numarVieti-=1;
if numarVieti!=0:
print "Ai murit!"
time.sleep(1);
#reporneste jocul
start();
#daca sarpele a ajuns cu capul peste mancare, mananc-o
if mancare == sarpe.coordonate:
sarpe.grow();
genereazaMancare();
scor += 1;
deseneazaTabla();
time.sleep(0.15);
print "Game over! Scorul tau final:", scor;
|
mit
|
KaranToor/MA450
|
google-cloud-sdk/.install/.backup/lib/third_party/apitools/gen/extended_descriptor.py
|
25
|
21730
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extended protorpc descriptors.
This takes existing protorpc Descriptor classes and adds extra
properties not directly supported in proto itself, notably field and
message descriptions. We need this in order to generate protorpc
message files with comments.
Note that for most of these classes, we can't simply wrap the existing
message, since we need to change the type of the subfields. We could
have a "plain" descriptor attached, but that seems like unnecessary
bookkeeping. Where possible, we purposely reuse existing tag numbers;
for new fields, we start numbering at 100.
"""
import abc
import operator
import textwrap
import six
from apitools.base.protorpclite import descriptor as protorpc_descriptor
from apitools.base.protorpclite import message_types
from apitools.base.protorpclite import messages
import apitools.base.py as apitools_base
class ExtendedEnumValueDescriptor(messages.Message):
"""Enum value descriptor with additional fields.
Fields:
name: Name of enumeration value.
number: Number of enumeration value.
description: Description of this enum value.
"""
name = messages.StringField(1)
number = messages.IntegerField(2, variant=messages.Variant.INT32)
description = messages.StringField(100)
class ExtendedEnumDescriptor(messages.Message):
"""Enum class descriptor with additional fields.
Fields:
name: Name of Enum without any qualification.
values: Values defined by Enum class.
description: Description of this enum class.
full_name: Fully qualified name of this enum class.
enum_mappings: Mappings from python to JSON names for enum values.
"""
class JsonEnumMapping(messages.Message):
"""Mapping from a python name to the wire name for an enum."""
python_name = messages.StringField(1)
json_name = messages.StringField(2)
name = messages.StringField(1)
values = messages.MessageField(
ExtendedEnumValueDescriptor, 2, repeated=True)
description = messages.StringField(100)
full_name = messages.StringField(101)
enum_mappings = messages.MessageField(
'JsonEnumMapping', 102, repeated=True)
class ExtendedFieldDescriptor(messages.Message):
"""Field descriptor with additional fields.
Fields:
field_descriptor: The underlying field descriptor.
name: The name of this field.
description: Description of this field.
"""
field_descriptor = messages.MessageField(
protorpc_descriptor.FieldDescriptor, 100)
# We duplicate the names for easier bookkeeping.
name = messages.StringField(101)
description = messages.StringField(102)
class ExtendedMessageDescriptor(messages.Message):
"""Message descriptor with additional fields.
Fields:
name: Name of Message without any qualification.
fields: Fields defined for message.
message_types: Nested Message classes defined on message.
enum_types: Nested Enum classes defined on message.
description: Description of this message.
full_name: Full qualified name of this message.
decorators: Decorators to include in the definition when printing.
Printed in the given order from top to bottom (so the last entry
is the innermost decorator).
alias_for: This type is just an alias for the named type.
field_mappings: Mappings from python to json field names.
"""
class JsonFieldMapping(messages.Message):
"""Mapping from a python name to the wire name for a field."""
python_name = messages.StringField(1)
json_name = messages.StringField(2)
name = messages.StringField(1)
fields = messages.MessageField(ExtendedFieldDescriptor, 2, repeated=True)
message_types = messages.MessageField(
'extended_descriptor.ExtendedMessageDescriptor', 3, repeated=True)
enum_types = messages.MessageField(
ExtendedEnumDescriptor, 4, repeated=True)
description = messages.StringField(100)
full_name = messages.StringField(101)
decorators = messages.StringField(102, repeated=True)
alias_for = messages.StringField(103)
field_mappings = messages.MessageField(
'JsonFieldMapping', 104, repeated=True)
class ExtendedFileDescriptor(messages.Message):
"""File descriptor with additional fields.
Fields:
package: Fully qualified name of package that definitions belong to.
message_types: Message definitions contained in file.
enum_types: Enum definitions contained in file.
description: Description of this file.
additional_imports: Extra imports used in this package.
"""
package = messages.StringField(2)
message_types = messages.MessageField(
ExtendedMessageDescriptor, 4, repeated=True)
enum_types = messages.MessageField(
ExtendedEnumDescriptor, 5, repeated=True)
description = messages.StringField(100)
additional_imports = messages.StringField(101, repeated=True)
def _WriteFile(file_descriptor, package, version, proto_printer):
"""Write the given extended file descriptor to the printer."""
proto_printer.PrintPreamble(package, version, file_descriptor)
_PrintEnums(proto_printer, file_descriptor.enum_types)
_PrintMessages(proto_printer, file_descriptor.message_types)
custom_json_mappings = _FetchCustomMappings(
file_descriptor.enum_types, file_descriptor.package)
custom_json_mappings.extend(
_FetchCustomMappings(
file_descriptor.message_types, file_descriptor.package))
for mapping in custom_json_mappings:
proto_printer.PrintCustomJsonMapping(mapping)
def WriteMessagesFile(file_descriptor, package, version, printer):
"""Write the given extended file descriptor to out as a message file."""
_WriteFile(file_descriptor, package, version,
_Proto2Printer(printer))
def WritePythonFile(file_descriptor, package, version, printer):
"""Write the given extended file descriptor to out."""
_WriteFile(file_descriptor, package, version,
_ProtoRpcPrinter(printer))
def PrintIndentedDescriptions(printer, ls, name, prefix=''):
if ls:
with printer.Indent(indent=prefix):
with printer.CommentContext():
width = printer.CalculateWidth() - len(prefix)
printer()
printer(name + ':')
for x in ls:
description = '%s: %s' % (x.name, x.description)
for line in textwrap.wrap(description, width,
initial_indent=' ',
subsequent_indent=' '):
printer(line)
def _FetchCustomMappings(descriptor_ls, package):
"""Find and return all custom mappings for descriptors in descriptor_ls."""
custom_mappings = []
for descriptor in descriptor_ls:
if isinstance(descriptor, ExtendedEnumDescriptor):
custom_mappings.extend(
_FormatCustomJsonMapping('Enum', m, descriptor, package)
for m in descriptor.enum_mappings)
elif isinstance(descriptor, ExtendedMessageDescriptor):
custom_mappings.extend(
_FormatCustomJsonMapping('Field', m, descriptor, package)
for m in descriptor.field_mappings)
custom_mappings.extend(
_FetchCustomMappings(descriptor.enum_types, package))
custom_mappings.extend(
_FetchCustomMappings(descriptor.message_types, package))
return custom_mappings
def _FormatCustomJsonMapping(mapping_type, mapping, descriptor, package):
return '\n'.join((
'encoding.AddCustomJson%sMapping(' % mapping_type,
" %s, '%s', '%s'," % (descriptor.full_name, mapping.python_name,
mapping.json_name),
' package=%r)' % package,
))
def _EmptyMessage(message_type):
return not any((message_type.enum_types,
message_type.message_types,
message_type.fields))
class ProtoPrinter(six.with_metaclass(abc.ABCMeta, object)):
"""Interface for proto printers."""
@abc.abstractmethod
def PrintPreamble(self, package, version, file_descriptor):
"""Print the file docstring and import lines."""
@abc.abstractmethod
def PrintEnum(self, enum_type):
"""Print the given enum declaration."""
@abc.abstractmethod
def PrintMessage(self, message_type):
"""Print the given message declaration."""
class _Proto2Printer(ProtoPrinter):
"""Printer for proto2 definitions."""
def __init__(self, printer):
self.__printer = printer
def __PrintEnumCommentLines(self, enum_type):
description = enum_type.description or '%s enum type.' % enum_type.name
for line in textwrap.wrap(description,
self.__printer.CalculateWidth() - 3):
self.__printer('// %s', line)
PrintIndentedDescriptions(self.__printer, enum_type.values, 'Values',
prefix='// ')
def __PrintEnumValueCommentLines(self, enum_value):
if enum_value.description:
width = self.__printer.CalculateWidth() - 3
for line in textwrap.wrap(enum_value.description, width):
self.__printer('// %s', line)
def PrintEnum(self, enum_type):
self.__PrintEnumCommentLines(enum_type)
self.__printer('enum %s {', enum_type.name)
with self.__printer.Indent():
enum_values = sorted(
enum_type.values, key=operator.attrgetter('number'))
for enum_value in enum_values:
self.__printer()
self.__PrintEnumValueCommentLines(enum_value)
self.__printer('%s = %s;', enum_value.name, enum_value.number)
self.__printer('}')
self.__printer()
def PrintPreamble(self, package, version, file_descriptor):
self.__printer('// Generated message classes for %s version %s.',
package, version)
self.__printer('// NOTE: This file is autogenerated and should not be '
'edited by hand.')
description_lines = textwrap.wrap(file_descriptor.description, 75)
if description_lines:
self.__printer('//')
for line in description_lines:
self.__printer('// %s', line)
self.__printer()
self.__printer('syntax = "proto2";')
self.__printer('package %s;', file_descriptor.package)
def __PrintMessageCommentLines(self, message_type):
"""Print the description of this message."""
description = message_type.description or '%s message type.' % (
message_type.name)
width = self.__printer.CalculateWidth() - 3
for line in textwrap.wrap(description, width):
self.__printer('// %s', line)
PrintIndentedDescriptions(self.__printer, message_type.enum_types,
'Enums', prefix='// ')
PrintIndentedDescriptions(self.__printer, message_type.message_types,
'Messages', prefix='// ')
PrintIndentedDescriptions(self.__printer, message_type.fields,
'Fields', prefix='// ')
def __PrintFieldDescription(self, description):
for line in textwrap.wrap(description,
self.__printer.CalculateWidth() - 3):
self.__printer('// %s', line)
def __PrintFields(self, fields):
for extended_field in fields:
field = extended_field.field_descriptor
field_type = messages.Field.lookup_field_type_by_variant(
field.variant)
self.__printer()
self.__PrintFieldDescription(extended_field.description)
label = str(field.label).lower()
if field_type in (messages.EnumField, messages.MessageField):
proto_type = field.type_name
else:
proto_type = str(field.variant).lower()
default_statement = ''
if field.default_value:
if field_type in [messages.BytesField, messages.StringField]:
default_value = '"%s"' % field.default_value
elif field_type is messages.BooleanField:
default_value = str(field.default_value).lower()
else:
default_value = str(field.default_value)
default_statement = ' [default = %s]' % default_value
self.__printer(
'%s %s %s = %d%s;',
label, proto_type, field.name, field.number, default_statement)
def PrintMessage(self, message_type):
self.__printer()
self.__PrintMessageCommentLines(message_type)
if _EmptyMessage(message_type):
self.__printer('message %s {}', message_type.name)
return
self.__printer('message %s {', message_type.name)
with self.__printer.Indent():
_PrintEnums(self, message_type.enum_types)
_PrintMessages(self, message_type.message_types)
self.__PrintFields(message_type.fields)
self.__printer('}')
def PrintCustomJsonMapping(self, mapping_lines):
raise NotImplementedError(
'Custom JSON encoding not supported for proto2')
class _ProtoRpcPrinter(ProtoPrinter):
"""Printer for ProtoRPC definitions."""
def __init__(self, printer):
self.__printer = printer
def __PrintClassSeparator(self):
self.__printer()
if not self.__printer.indent:
self.__printer()
def __PrintEnumDocstringLines(self, enum_type):
description = enum_type.description or '%s enum type.' % enum_type.name
for line in textwrap.wrap('"""%s' % description,
self.__printer.CalculateWidth()):
self.__printer(line)
PrintIndentedDescriptions(self.__printer, enum_type.values, 'Values')
self.__printer('"""')
def PrintEnum(self, enum_type):
self.__printer('class %s(_messages.Enum):', enum_type.name)
with self.__printer.Indent():
self.__PrintEnumDocstringLines(enum_type)
enum_values = sorted(
enum_type.values, key=operator.attrgetter('number'))
for enum_value in enum_values:
self.__printer('%s = %s', enum_value.name, enum_value.number)
if not enum_type.values:
self.__printer('pass')
self.__PrintClassSeparator()
def __PrintAdditionalImports(self, imports):
"""Print additional imports needed for protorpc."""
google_imports = [x for x in imports if 'google' in x]
other_imports = [x for x in imports if 'google' not in x]
if other_imports:
for import_ in sorted(other_imports):
self.__printer(import_)
self.__printer()
# Note: If we ever were going to add imports from this package, we'd
# need to sort those out and put them at the end.
if google_imports:
for import_ in sorted(google_imports):
self.__printer(import_)
self.__printer()
def PrintPreamble(self, package, version, file_descriptor):
self.__printer('"""Generated message classes for %s version %s.',
package, version)
self.__printer()
for line in textwrap.wrap(file_descriptor.description, 78):
self.__printer(line)
self.__printer('"""')
self.__printer('# NOTE: This file is autogenerated and should not be '
'edited by hand.')
self.__printer()
self.__PrintAdditionalImports(file_descriptor.additional_imports)
self.__printer()
self.__printer("package = '%s'", file_descriptor.package)
self.__printer()
self.__printer()
def __PrintMessageDocstringLines(self, message_type):
"""Print the docstring for this message."""
description = message_type.description or '%s message type.' % (
message_type.name)
short_description = (
_EmptyMessage(message_type) and
len(description) < (self.__printer.CalculateWidth() - 6))
with self.__printer.CommentContext():
if short_description:
# Note that we use explicit string interpolation here since
# we're in comment context.
self.__printer('"""%s"""' % description)
return
for line in textwrap.wrap('"""%s' % description,
self.__printer.CalculateWidth()):
self.__printer(line)
PrintIndentedDescriptions(self.__printer, message_type.enum_types,
'Enums')
PrintIndentedDescriptions(
self.__printer, message_type.message_types, 'Messages')
PrintIndentedDescriptions(
self.__printer, message_type.fields, 'Fields')
self.__printer('"""')
self.__printer()
def PrintMessage(self, message_type):
if message_type.alias_for:
self.__printer(
'%s = %s', message_type.name, message_type.alias_for)
self.__PrintClassSeparator()
return
for decorator in message_type.decorators:
self.__printer('@%s', decorator)
self.__printer('class %s(_messages.Message):', message_type.name)
with self.__printer.Indent():
self.__PrintMessageDocstringLines(message_type)
_PrintEnums(self, message_type.enum_types)
_PrintMessages(self, message_type.message_types)
_PrintFields(message_type.fields, self.__printer)
self.__PrintClassSeparator()
def PrintCustomJsonMapping(self, mapping):
self.__printer(mapping)
def _PrintEnums(proto_printer, enum_types):
"""Print all enums to the given proto_printer."""
enum_types = sorted(enum_types, key=operator.attrgetter('name'))
for enum_type in enum_types:
proto_printer.PrintEnum(enum_type)
def _PrintMessages(proto_printer, message_list):
message_list = sorted(message_list, key=operator.attrgetter('name'))
for message_type in message_list:
proto_printer.PrintMessage(message_type)
_MESSAGE_FIELD_MAP = {
message_types.DateTimeMessage.definition_name(): (
message_types.DateTimeField),
}
def _PrintFields(fields, printer):
for extended_field in fields:
field = extended_field.field_descriptor
printed_field_info = {
'name': field.name,
'module': '_messages',
'type_name': '',
'type_format': '',
'number': field.number,
'label_format': '',
'variant_format': '',
'default_format': '',
}
message_field = _MESSAGE_FIELD_MAP.get(field.type_name)
if message_field:
printed_field_info['module'] = '_message_types'
field_type = message_field
elif field.type_name == 'extra_types.DateField':
printed_field_info['module'] = 'extra_types'
field_type = apitools_base.DateField
else:
field_type = messages.Field.lookup_field_type_by_variant(
field.variant)
if field_type in (messages.EnumField, messages.MessageField):
printed_field_info['type_format'] = "'%s', " % field.type_name
if field.label == protorpc_descriptor.FieldDescriptor.Label.REQUIRED:
printed_field_info['label_format'] = ', required=True'
elif field.label == protorpc_descriptor.FieldDescriptor.Label.REPEATED:
printed_field_info['label_format'] = ', repeated=True'
if field_type.DEFAULT_VARIANT != field.variant:
printed_field_info['variant_format'] = (
', variant=_messages.Variant.%s' % field.variant)
if field.default_value:
if field_type in [messages.BytesField, messages.StringField]:
default_value = repr(field.default_value)
elif field_type is messages.EnumField:
try:
default_value = str(int(field.default_value))
except ValueError:
default_value = repr(field.default_value)
else:
default_value = field.default_value
printed_field_info[
'default_format'] = ', default=%s' % (default_value,)
printed_field_info['type_name'] = field_type.__name__
args = ''.join('%%(%s)s' % field for field in (
'type_format',
'number',
'label_format',
'variant_format',
'default_format'))
format_str = '%%(name)s = %%(module)s.%%(type_name)s(%s)' % args
printer(format_str % printed_field_info)
|
apache-2.0
|
blossomica/airmozilla
|
mysql2postgres/py-mysql2pgsql-0.1.5/mysql2pgsql/lib/mysql_reader.py
|
16
|
7435
|
from __future__ import with_statement, absolute_import
import re
from contextlib import closing
import MySQLdb
import MySQLdb.cursors
re_column_length = re.compile(r'\((\d+)\)')
re_column_precision = re.compile(r'\((\d+),(\d+)\)')
re_key_1 = re.compile(r'CONSTRAINT `(\w+)` FOREIGN KEY \(`(\w+)`\) REFERENCES `(\w+)` \(`(\w+)`\)')
re_key_2 = re.compile(r'KEY `(\w+)` \((.*)\)')
re_key_3 = re.compile(r'PRIMARY KEY \((.*)\)')
class DB:
"""
Class that wraps MySQLdb functions that auto reconnects
thus (hopefully) preventing the frustrating
"server has gone away" error. Also adds helpful
helper functions.
"""
conn = None
def __init__(self, options):
args = {
'user': options.get('username', 'root'),
'db': options['database'],
'use_unicode': True,
'charset': 'utf8',
}
if options.get('password', None):
args['passwd'] = options.get('password', None)
if options.get('socket', None):
args['unix_socket'] = options['socket']
else:
args['host'] = options.get('hostname', 'localhost')
args['port'] = options.get('port', 3306)
args['compress'] = options.get('compress', True)
self.options = args
def connect(self):
self.conn = MySQLdb.connect(**self.options)
def close(self):
self.conn.close()
def cursor(self, cursorclass=MySQLdb.cursors.Cursor):
try:
return self.conn.cursor(cursorclass)
except (AttributeError, MySQLdb.OperationalError):
self.connect()
return self.conn.cursor(cursorclass)
def list_tables(self):
return self.query('SHOW TABLES;')
def query(self, sql, args=(), one=False, large=False):
return self.query_one(sql, args) if one\
else self.query_many(sql, args, large)
def query_one(self, sql, args):
with closing(self.cursor()) as cur:
cur.execute(sql, args)
return cur.fetchone()
def query_many(self, sql, args, large):
with closing(self.cursor(MySQLdb.cursors.SSCursor if large else MySQLdb.cursors.Cursor)) as cur:
cur.execute(sql, args)
for row in cur:
yield row
class MysqlReader(object):
class Table(object):
def __init__(self, reader, name):
self.reader = reader
self._name = name
self._indexes = []
self._foreign_keys = []
self._columns = self._load_columns()
self._load_indexes()
def _convert_type(self, data_type):
"""Normalize MySQL `data_type`"""
if 'varchar' in data_type:
return 'varchar'
elif 'char' in data_type:
return 'char'
elif data_type in ('bit(1)', 'tinyint(1)', 'tinyint(1) unsigned'):
return 'boolean'
elif re.search(r'smallint.* unsigned', data_type) or 'mediumint' in data_type:
return 'integer'
elif 'smallint' in data_type:
return 'tinyint'
elif 'tinyint' in data_type or 'year(' in data_type:
return 'tinyint'
elif 'bigint' in data_type and 'unsigned' in data_type:
return 'numeric'
elif re.search(r'int.* unsigned', data_type) or\
('bigint' in data_type and 'unsigned' not in data_type):
return 'bigint'
elif 'int' in data_type:
return 'integer'
elif 'float' in data_type:
return 'float'
elif 'decimal' in data_type:
return 'decimal'
elif 'double' in data_type:
return 'double precision'
else:
return data_type
def _load_columns(self):
fields = []
for res in self.reader.db.query('EXPLAIN `%s`' % self.name):
length_match = re_column_length.search(res[1])
precision_match = re_column_precision.search(res[1])
length = length_match.group(1) if length_match else \
precision_match.group(1) if precision_match else None
desc = {
'name': res[0],
'table_name': self.name,
'type': self._convert_type(res[1]),
'length': int(length) if length else None,
'decimals': precision_match.group(2) if precision_match else None,
'null': res[2] == 'YES',
'primary_key': res[3] == 'PRI',
'auto_increment': res[5] == 'auto_increment',
'default': res[4] if not res[4] == 'NULL' else None,
}
fields.append(desc)
for field in (f for f in fields if f['auto_increment']):
res = self.reader.db.query('SELECT MAX(`%s`) FROM `%s`;' % (field['name'], self.name), one=True)
field['maxval'] = int(res[0]) if res[0] else 0
return fields
def _load_indexes(self):
explain = self.reader.db.query('SHOW CREATE TABLE `%s`' % self.name, one=True)
explain = explain[1]
for line in explain.split('\n'):
if ' KEY ' not in line:
continue
index = {}
match_data = re_key_1.search(line)
if match_data:
index['name'] = match_data.group(1)
index['column'] = match_data.group(2)
index['ref_table'] = match_data.group(3)
index['ref_column'] = match_data.group(4)
self._foreign_keys.append(index)
continue
match_data = re_key_2.search(line)
if match_data:
index['name'] = match_data.group(1)
index['columns'] = [re.search(r'`(\w+)`', col).group(1) for col in match_data.group(2).split(',')]
index['unique'] = 'UNIQUE' in line
self._indexes.append(index)
continue
match_data = re_key_3.search(line)
if match_data:
index['primary'] = True
index['columns'] = [re.sub(r'\(\d+\)', '', col.replace('`', '')) for col in match_data.group(1).split(',')]
self._indexes.append(index)
continue
@property
def name(self):
return self._name
@property
def columns(self):
return self._columns
@property
def indexes(self):
return self._indexes
@property
def foreign_keys(self):
return self._foreign_keys
@property
def query_for(self):
return 'SELECT %(column_names)s FROM `%(table_name)s`' % {
'table_name': self.name,
'column_names': ', '. join(("`%s`" % c['name']) for c in self.columns)}
def __init__(self, options):
self.db = DB(options)
@property
def tables(self):
return (self.Table(self, t[0]) for t in self.db.list_tables())
def read(self, table):
return self.db.query(table.query_for, large=True)
def close(self):
self.db.close()
|
bsd-3-clause
|
M4rtinK/anaconda
|
pyanaconda/core/timer.py
|
4
|
3465
|
# Timer class for scheduling methods after some time.
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Author(s): Jiri Konecny <jkonecny@redhat.com>
#
from pyanaconda.core.glib import timeout_add, timeout_add_seconds, idle_add, source_remove
class Timer(object):
"""Object to schedule functions and methods to the GLib event loop.
Everything scheduled by Timer is ran on the main thread!
"""
def __init__(self):
self._id = 0
def timeout_sec(self, seconds, callback, *args, **kwargs):
"""Schedule method to be run after given amount of seconds.
.. NOTE::
The callback will be repeatedly called until the callback will return False or
`cancel()` is called.
:param seconds: Number of seconds after which the callback will be called.
:type seconds: int
:param callback: Callback which will be called.
:type callback: Function.
:param args: Arguments passed to the callback.
:param kwargs: Keyword arguments passed to the callback.
"""
self._id = timeout_add_seconds(seconds, callback, *args, **kwargs)
def timeout_msec(self, miliseconds, callback, *args, **kwargs):
"""Schedule method to be run after given amount of miliseconds.
.. NOTE::
The callback will be repeatedly called until the callback will return False or
`cancel()` is called.
:param miliseconds: Number of miliseconds after which the callback will be called.
:type miliseconds: int
:param callback: Callback which will be called.
:type callback: Function.
:param args: Arguments passed to the callback.
:param kwargs: Keyword arguments passed to the callback.
"""
self._id = timeout_add(miliseconds, callback, *args, **kwargs)
def timeout_now(self, callback, *args, **kwargs):
"""Schedule method to be run when event loop will be empty (idle).
.. NOTE::
This method is mainly used to run something in the main thread.
:param callback: Callback which will be called.
:type callback: Function.
:param args: Arguments passed to the callback.
:param kwargs: Keyword arguments passed to the callback.
"""
self._id = idle_add(callback, *args, **kwargs)
def cancel(self):
"""Cancel scheduled callback.
This way the schedule_sec and schedule_msec repetition can be canceled.
"""
source_remove(self._id)
self._id = 0
|
gpl-2.0
|
Poles/Poles
|
platforms/linux/JsonCpp/scons-local-2.3.0/SCons/Tool/ifort.py
|
11
|
3362
|
"""SCons.Tool.ifort
Tool-specific initialization for newer versions of the Intel Fortran Compiler
for Linux/Windows (and possibly Mac OS X).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/ifort.py 2013/03/03 09:48:35 garyo"
import SCons.Defaults
from SCons.Scanner.Fortran import FortranScan
from FortranCommon import add_all_to_env
def generate(env):
"""Add Builders and construction variables for ifort to an Environment."""
# ifort supports Fortran 90 and Fortran 95
# Additionally, ifort recognizes more file extensions.
fscan = FortranScan("FORTRANPATH")
SCons.Tool.SourceFileScanner.add_scanner('.i', fscan)
SCons.Tool.SourceFileScanner.add_scanner('.i90', fscan)
if 'FORTRANFILESUFFIXES' not in env:
env['FORTRANFILESUFFIXES'] = ['.i']
else:
env['FORTRANFILESUFFIXES'].append('.i')
if 'F90FILESUFFIXES' not in env:
env['F90FILESUFFIXES'] = ['.i90']
else:
env['F90FILESUFFIXES'].append('.i90')
add_all_to_env(env)
fc = 'ifort'
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
env['%s' % dialect] = fc
env['SH%s' % dialect] = '$%s' % dialect
if env['PLATFORM'] == 'posix':
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS -fPIC' % dialect)
if env['PLATFORM'] == 'win32':
# On Windows, the ifort compiler specifies the object on the
# command line with -object:, not -o. Massage the necessary
# command-line construction variables.
for dialect in ['F77', 'F90', 'FORTRAN', 'F95']:
for var in ['%sCOM' % dialect, '%sPPCOM' % dialect,
'SH%sCOM' % dialect, 'SH%sPPCOM' % dialect]:
env[var] = env[var].replace('-o $TARGET', '-object:$TARGET')
env['FORTRANMODDIRPREFIX'] = "/module:"
else:
env['FORTRANMODDIRPREFIX'] = "-module "
def exists(env):
return env.Detect('ifort')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gpl-3.0
|
XiaominZhang/Impala
|
tests/comparison/cli_options.py
|
12
|
7034
|
# Copyright (c) 2014 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Helpers for parsing command line options'''
import logging
import os
import sys
from optparse import NO_DEFAULT, OptionGroup
from tempfile import gettempdir
from tests.comparison.types import TYPES
def add_logging_options(section, default_debug_log_file=None):
if not default_debug_log_file:
default_debug_log_file = os.path.join(
gettempdir(), os.path.basename(sys.modules["__main__"].__file__) + ".log")
section.add_option('--log-level', default='INFO',
help='The log level to use.', choices=('DEBUG', 'INFO', 'WARN', 'ERROR'))
section.add_option('--debug-log-file', default=default_debug_log_file,
help='Path to debug log file.')
def configure_logging(log_level, debug_log_file=None, log_thread_id=False,
log_process_id=False):
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
console_logger = logging.StreamHandler(sys.stdout)
console_logger.name = "console"
console_logger.setLevel(getattr(logging, log_level))
format = "%(asctime)s"
if log_process_id:
format += " %(process)d"
if log_thread_id:
format += " %(thread)d"
format += " %(levelname)s:%(module)s[%(lineno)s]:%(message)s"
console_logger.setFormatter(logging.Formatter(format, "%H:%M:%S"))
root_logger.addHandler(console_logger)
if debug_log_file:
file_logger = logging.FileHandler(debug_log_file, mode="w")
file_logger.name = "file"
file_logger.setFormatter(logging.Formatter(format, "%H:%M:%S"))
file_logger.setLevel(logging.DEBUG)
root_logger.addHandler(file_logger)
def add_cm_options(section):
section.add_option('--cm-host', metavar='host name',
help='The host name of the CM server.')
section.add_option('--cm-port', default=7180, type=int, metavar='port number',
help='The port of the CM server.')
section.add_option('--cm-user', default="admin", metavar='user name',
help='The name of the CM user.')
section.add_option('--cm-password', default="admin", metavar='password',
help='The password for the CM user.')
section.add_option('--cm-cluster-name', metavar='name',
help='If CM manages multiple clusters, use this to specify which cluster to use.')
def add_db_name_option(section):
section.add_option('--db-name', default='randomness',
help='The name of the database to use. Ex: functional.')
def add_storage_format_options(section):
storage_formats = ['avro', 'parquet', 'rcfile', 'sequencefile', 'textfile']
section.add_option('--storage-file-formats', default=','.join(storage_formats),
help='A comma separated list of storage formats to use.')
def add_data_types_options(section):
section.add_option('--data-types', default=','.join(type_.__name__ for type_ in TYPES),
help='A comma separated list of data types to use.')
def add_timeout_option(section):
section.add_option('--timeout', default=(3 * 60), type=int, help='Query timeout in seconds')
def add_connection_option_groups(parser):
group = OptionGroup(parser, "Impala Options")
group.add_option('--impalad-host', default='localhost',
help="The name of the host running the Impala daemon")
group.add_option("--impalad-hs2-port", default=21050, type=int,
help="The hs2 port of the host running the Impala daemon")
parser.add_option_group(group)
group = OptionGroup(parser, "Hive Options")
group.add_option('--use-hive', action='store_true', default=False,
help='Use Hive (Impala will be skipped)')
group.add_option('--hive-host', default='localhost',
help="The name of the host running the HS2")
group.add_option("--hive-port", default=10000, type=int,
help="The port of HiveServer2")
group.add_option('--hive-user', default='hive',
help="The user name to use when connecting to HiveServer2")
group.add_option('--hive-password', default='hive',
help="The password to use when connecting to HiveServer2")
group.add_option('--hdfs-host',
help='The host for HDFS backing Hive tables, necessary for external HiveServer2')
group.add_option('--hdfs-port',
help='The port for HDFS backing Hive tables, necessary for external HiveServer2')
parser.add_option_group(group)
group = OptionGroup(parser, 'MySQL Options')
group.add_option('--use-mysql', action='store_true', default=False,
help='Use MySQL')
group.add_option('--mysql-host', default='localhost',
help='The name of the host running the MySQL database.')
group.add_option('--mysql-port', default=3306, type=int,
help='The port of the host running the MySQL database.')
group.add_option('--mysql-user', default='root',
help='The user name to use when connecting to the MySQL database.')
group.add_option('--mysql-password',
help='The password to use when connecting to the MySQL database.')
parser.add_option_group(group)
group = OptionGroup(parser, 'Oracle Options')
group.add_option('--use-oracle', action='store_true', default=False,
help='Use Oracle')
group.add_option('--oracle-host', default='localhost',
help='The name of the host running the Oracle database.')
group.add_option('--oracle-port', default=1521, type=int,
help='The port of the host running the Oracle database.')
group.add_option('--oracle-user', default='system',
help='The user name to use when connecting to the Oracle database.')
group.add_option('--oracle-password',
help='The password to use when connecting to the Oracle database.')
parser.add_option_group(group)
group = OptionGroup(parser, 'Postgresql Options')
group.add_option('--use-postgresql', action='store_true', default=False,
help='Use Postgresql')
group.add_option('--postgresql-host', default='localhost',
help='The name of the host running the Postgresql database.')
group.add_option('--postgresql-port', default=5432, type=int,
help='The port of the host running the Postgresql database.')
group.add_option('--postgresql-user', default='postgres',
help='The user name to use when connecting to the Postgresql database.')
group.add_option('--postgresql-password',
help='The password to use when connecting to the Postgresql database.')
parser.add_option_group(group)
def add_default_values_to_help(parser):
for group in parser.option_groups + [parser]:
for option in group.option_list:
if option.default != NO_DEFAULT and option.help:
option.help += ' [default: %default]'
|
apache-2.0
|
txemi/ansible
|
lib/ansible/utils/module_docs_fragments/dellos9.py
|
303
|
2451
|
#
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
default: 22
username:
description:
- User to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Password to authenticate the SSH session to the remote device. If the
value is not specified in the task, the value of environment variable
C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
ssh_keyfile:
description:
- Path to an ssh key used to authenticate the SSH session to the remote
device. If the value is not specified in the task, the value of
environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
timeout:
description:
- Specifies idle timeout (in seconds) for the connection. Useful if the
console freezes before continuing. For example when saving
configurations.
default: 10
"""
|
gpl-3.0
|
fotinakis/sentry
|
src/sentry/models/eventerror.py
|
1
|
2581
|
from __future__ import absolute_import
import six
class EventError(object):
INVALID_DATA = 'invalid_data'
INVALID_ATTRIBUTE = 'invalid_attribute'
VALUE_TOO_LONG = 'value_too_long'
UNKNOWN_ERROR = 'unknown_error'
SECURITY_VIOLATION = 'security_violation'
RESTRICTED_IP = 'restricted_ip'
JS_GENERIC_FETCH_ERROR = 'js_generic_fetch_error'
JS_INVALID_HTTP_CODE = 'js_invalid_http_code'
JS_INVALID_CONTENT = 'js_invalid_content'
JS_NO_COLUMN = 'js_no_column'
JS_MISSING_SOURCE = 'js_no_source'
JS_INVALID_SOURCEMAP = 'js_invalid_source'
JS_TOO_MANY_REMOTE_SOURCES = 'js_too_many_sources'
JS_INVALID_SOURCE_ENCODING = 'js_invalid_source_encoding'
JS_INVALID_SOURCEMAP_LOCATION = 'js_invalid_sourcemap_location'
NATIVE_NO_CRASHED_THREAD = 'native_no_crashed_thread'
NATIVE_INTERNAL_FAILURE = 'native_internal_failure'
NATIVE_NO_SYMSYND = 'native_no_symsynd'
_messages = {
INVALID_DATA: u'Discarded invalid value for parameter \'{name}\'',
INVALID_ATTRIBUTE: u'Discarded invalid parameter \'{name}\'',
VALUE_TOO_LONG: u'Discarded value for \'{name}\' due to exceeding maximum length',
UNKNOWN_ERROR: u'Unknown error',
SECURITY_VIOLATION: u'Cannot fetch resource due to security violation on {url}',
RESTRICTED_IP: u'Cannot fetch resource due to restricted IP address on {url}',
JS_GENERIC_FETCH_ERROR: u'Unable to fetch resource: {url}',
JS_INVALID_HTTP_CODE: u'HTTP returned {value} response on {url}',
JS_INVALID_CONTENT: u'Source file was not JavaScript: {url}',
JS_NO_COLUMN: u'Cannot expand sourcemap due to no column information for {url}',
JS_MISSING_SOURCE: u'Source code was not found for {url}',
JS_INVALID_SOURCEMAP: u'Sourcemap was invalid or not parseable: {url}',
JS_TOO_MANY_REMOTE_SOURCES: u'The maximum number of remote source requests was made',
JS_INVALID_SOURCE_ENCODING: u'Source file was not \'{value}\' encoding: {url}',
JS_INVALID_SOURCEMAP_LOCATION: u'Invalid location in sourcemap: ({column}, {row})',
NATIVE_NO_CRASHED_THREAD: u'No crashed thread found in crash report',
NATIVE_INTERNAL_FAILURE: u'Internal failure when attempting to symbolicate: {error}',
NATIVE_NO_SYMSYND: u'The symbolizer is not configured for this system.',
}
@classmethod
def get_message(cls, data):
return cls._messages[data['type']].format(**data)
def to_dict(self):
return {k: v for k, v in six.iteritems(self) if k != 'type'}
|
bsd-3-clause
|
hainm/scikit-learn
|
examples/preprocessing/plot_function_transformer.py
|
161
|
1949
|
"""
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
|
bsd-3-clause
|
blackzw/openwrt_sdk_dev1
|
staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/keyword.py
|
179
|
1994
|
#! /usr/bin/env python
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'exec',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'not',
'or',
'pass',
'print',
'raise',
'return',
'try',
'while',
'with',
'yield',
#--end keywords--
]
iskeyword = frozenset(kwlist).__contains__
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# scan the source file for keywords
fp = open(iptfile)
strprog = re.compile('"([^"]+)"')
lines = []
for line in fp:
if '{1, "' in line:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
fp.close()
lines.sort()
# load the output skeleton from the target
fp = open(optfile)
format = fp.readlines()
fp.close()
# insert the lines of keywords
try:
start = format.index("#--start keywords--\n") + 1
end = format.index("#--end keywords--\n")
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
fp = open(optfile, 'w')
fp.write(''.join(format))
fp.close()
if __name__ == "__main__":
main()
|
gpl-2.0
|
dmnyu/bitcurator
|
bctools/fiwalk.py
|
3
|
9124
|
#!/usr/bin/env python
### DO NOT MODIFY THIS FILE ###
### DOWNLOAD NEW FILE FROM https://raw.github.com/simsong/dfxml/master/python/fiwalk.py
#
# fiwalk version 0.6.3
#
# %%% BEGIN NO FILL
"""fiwalk module
This is the part of dfxml that is dependent on fiwalk.py
"""
import dfxml
from sys import stderr
from subprocess import Popen,PIPE
ALLOC_ONLY = 1
fiwalk_cached_installed_version = None
def fiwalk_installed_version(fiwalk='fiwalk'):
"""Return the current version of fiwalk that is installed"""
global fiwalk_cached_installed_version
if fiwalk_cached_installed_version:
return fiwalk_cached_installed_version
from subprocess import Popen,PIPE
import re
for line in Popen([fiwalk,'-V'],stdout=PIPE).stdout.read().decode('utf-8').split("\n"):
g = re.search("^FIWalk Version:\s+(.*)$",line)
if g:
fiwalk_cached_installed_version = g.group(1)
return fiwalk_cached_installed_version
g = re.search("^SleuthKit Version:\s+(.*)$",line)
if g:
fiwalk_cached_installed_version = g.group(1)
return fiwalk_cached_installed_version
return None
class XMLDone(Exception):
def __init__(self,value):
self.value = value
class version:
def __init__(self):
self.cdata = ""
self.in_element = []
self.version = None
def start_element(self,name,attrs):
if(name=='volume'): # too far?
raise XMLDone(None)
self.in_element += [name]
self.cdata = ""
def end_element(self,name):
if ("fiwalk" in self.in_element) and ("creator" in self.in_element) and ("version" in self.in_element):
raise XMLDone(self.cdata)
if ("fiwalk" in self.in_element) and ("fiwalk_version" in self.in_element):
raise XMLDone(self.cdata)
if ("version" in self.in_element) and ("dfxml" in self.in_element) and ("creator" in self.in_element):
raise XMLDone(self.cdata)
self.in_element.pop()
self.cdata = ""
def end_element_version(self,name):
if ("fiwalk" in self.in_element) and ("creator" in self.in_element) and ("version" in self.in_element):
raise XMLDone(self.cdata)
if ("fiwalk" in self.in_element) and ("fiwalk_version" in self.in_element):
raise XMLDone(self.cdata)
if ("version" in self.in_element) and ("dfxml" in self.in_element) and ("creator" in self.in_element):
raise XMLDone(self.cdata)
self.in_element.pop()
self.cdata = ""
def end_element_command_line(self,name):
if ("command_line" in self.in_element) and ("execution_environment" in self.in_element) and ("creator" in self.in_element) and ("dfxml" in self.in_element):
raise XMLDone(self.cdata)
self.in_element.pop()
self.cdata = ""
def end_element_start_time(self,name):
if ("start_time" in self.in_element) and ("execution_environment" in self.in_element) and ("creator" in self.in_element) and ("dfxml" in self.in_element):
raise XMLDone(self.cdata)
self.in_element.pop()
self.cdata = ""
def char_data(self,data):
self.cdata += data
def get_version(self,fn):
import xml.parsers.expat
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = self.start_element
p.EndElementHandler = self.end_element
p.CharacterDataHandler = self.char_data
try:
p.ParseFile(open(fn,'rb'))
except XMLDone as e:
return e.value
except xml.parsers.expat.ExpatError:
return None # XML error
# Get the information associated with line "command_line"
def get_command_line(self, fn):
import xml.parsers.expat
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = self.start_element
p.EndElementHandler = self.end_element_command_line
p.CharacterDataHandler = self.char_data
try:
p.ParseFile(open(fn,'rb'))
except XMLDone as e:
return e.value
except xml.parsers.expat.ExpatError:
return None # XML error
# Get the information associated with line "start_time"
def get_start_time(self, fn):
import xml.parsers.expat
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = self.start_element
p.EndElementHandler = self.end_element_start_time
p.CharacterDataHandler = self.char_data
try:
p.ParseFile(open(fn,'rb'))
except XMLDone as e:
return e.value
except xml.parsers.expat.ExpatError:
return None # XML error
def fiwalk_xml_version(filename=None):
"""Returns the fiwalk version that was used to create an XML file.
Uses the "quick and dirty" approach to getting to getting out the XML version."""
p = version()
return p.get_version(filename)
def fiwalk_xml_command_line(filename=None):
"""Returns the fiwalk version that was used to create an XML file.
Uses the "quick and dirty" approach to getting to getting out the XML version."""
p = version()
return p.get_command_line(filename)
def fiwalk_xml_start_time(filename=None):
"""Returns the fiwalk version that was used to create an XML file.
Uses the "quick and dirty" approach to getting to getting out the XML version."""
p = version()
return p.get_start_time(filename)
################################################################
def E01_glob(fn):
import os.path
"""If the filename ends .E01, then glob it. Currently only handles E01 through EZZ"""
ret = [fn]
if fn.endswith(".E01") and os.path.exists(fn):
fmt = fn.replace(".E01",".E%02d")
for i in range(2,100):
f2 = fmt % i
if os.path.exists(f2):
ret.append(f2)
else:
return ret
# Got through E99, now do EAA through EZZ
fmt = fn.replace(".E01",".E%c%c")
for i in range(0,26):
for j in range(0,26):
f2 = fmt % (i+ord('A'),j+ord('A'))
if os.path.exists(f2):
ret.append(f2)
else:
return ret
return ret # don't do F01 through F99, etc.
return ret
def fiwalk_xml_stream(imagefile=None,flags=0,fiwalk="fiwalk",fiwalk_args=""):
""" Returns an fiwalk XML stream given a disk image by running fiwalk."""
if flags & ALLOC_ONLY: fiwalk_args += "-O"
from subprocess import call,Popen,PIPE
# Make sure we have a valid fiwalk
try:
res = Popen([fiwalk,'-V'],stdout=PIPE).communicate()[0]
except OSError:
raise RuntimeError("Cannot execute fiwalk executable: "+fiwalk)
cmd = [fiwalk,'-x']
if fiwalk_args: cmd += [fiwalk_args]
p = Popen(cmd + E01_glob(imagefile.name),stdout=PIPE)
return p.stdout
def fiwalk_using_sax(imagefile=None,xmlfile=None,fiwalk="fiwalk",flags=0,callback=None,fiwalk_args=""):
"""Processes an image using expat, calling a callback for every file object encountered.
If xmlfile is provided, use that as the xmlfile, otherwise runs fiwalk."""
import dfxml
if xmlfile==None:
xmlfile = fiwalk_xml_stream(imagefile=imagefile,flags=flags,fiwalk=fiwalk,fiwalk_args=fiwalk_args)
r = dfxml.fileobject_reader(flags=flags)
r.imagefile = imagefile
r.process_xml_stream(xmlfile,callback)
def fiwalk_vobj_using_sax(imagefile=None,xmlfile=None,fiwalk="fiwalk",flags=0,callback=None):
"""Processes an image using expat, calling a callback for every file object encountered.
If xmlfile is provided, use that as the xmlfile, otherwise runs fiwalk."""
import dfxml
if xmlfile==None:
xmlfile = fiwalk_xml_stream(imagefile=imagefile,flags=flags,fiwalk=fiwalk)
r = dfxml.volumeobject_reader()
r.imagefile = imagefile
r.process_xml_stream(xmlfile,callback)
return r
def fileobjects_using_sax(imagefile=None,xmlfile=None,fiwalk="fiwalk",flags=0):
ret = []
fiwalk_using_sax(imagefile=imagefile,xmlfile=xmlfile,fiwalk=fiwalk,flags=flags,
callback = lambda fi:ret.append(fi))
return ret
def fileobjects_using_dom(imagefile=None,xmlfile=None,fiwalk="fiwalk",flags=0,callback=None):
"""Processes an image using expat, calling a callback for every file object encountered.
If xmlfile is provided, use that as the xmlfile, otherwise runs fiwalk."""
import dfxml
if xmlfile==None:
xmlfile = fiwalk_xml_stream(imagefile=imagefile,flags=flags,fiwalk=fiwalk)
return dfxml.fileobjects_dom(xmlfile=xmlfile,imagefile=imagefile,flags=flags)
ctr = 0
def cb_count(fn):
global ctr
ctr += 1
if __name__=="__main__":
import sys
for fn in sys.argv[1:]:
print("{} contains fiwalk version {}".format(fn,fiwalk_xml_version(fn)))
# Count the number of files
fiwalk_using_sax(xmlfile=open(fn,'rb'),callback=cb_count)
print("Files: {}".format(ctr))
|
gpl-3.0
|
dodo5522/python-xbee
|
xbee/tests/test_frame.py
|
51
|
3082
|
#! /usr/bin/python
"""
test_frame.py
Paul Malmsten, 2010
pmalmsten@gmail.com
Tests frame module for proper behavior
"""
import unittest
from xbee.frame import APIFrame
from xbee.python2to3 import byteToInt, intToByte
class TestAPIFrameGeneration(unittest.TestCase):
"""
XBee class must be able to create a valid API frame given binary
data, in byte string form.
"""
def test_single_byte(self):
"""
create a frame containing a single byte
"""
data = b'\x00'
# start byte, two length bytes, data byte, checksum
expected_frame = b'\x7E\x00\x01\x00\xFF'
frame = APIFrame(data).output()
self.assertEqual(frame, expected_frame)
class TestAPIFrameParsing(unittest.TestCase):
"""
XBee class must be able to read and validate the data contained
by a valid API frame.
"""
def test_remaining_bytes(self):
"""
remaining_bytes() should provide accurate indication
of remaining bytes required before parsing a packet
"""
api_frame = APIFrame()
frame = b'\x7E\x00\x04\x00\x00\x00\x00\xFF'
self.assertEqual(api_frame.remaining_bytes(), 3)
api_frame.fill(frame[0])
self.assertEqual(api_frame.remaining_bytes(), 2)
api_frame.fill(frame[1])
self.assertEqual(api_frame.remaining_bytes(), 1)
api_frame.fill(frame[2])
self.assertEqual(api_frame.remaining_bytes(), 5)
api_frame.fill(frame[3])
self.assertEqual(api_frame.remaining_bytes(), 4)
def test_single_byte(self):
"""
read a frame containing a single byte
"""
api_frame = APIFrame()
frame = b'\x7E\x00\x01\x00\xFF'
expected_data = b'\x00'
for byte in frame:
api_frame.fill(intToByte(byteToInt(byte)))
api_frame.parse()
self.assertEqual(api_frame.data, expected_data)
def test_invalid_checksum(self):
"""
when an invalid frame is read, an exception must be raised
"""
api_frame = APIFrame()
frame = b'\x7E\x00\x01\x00\xF6'
for byte in frame:
api_frame.fill(intToByte(byteToInt(byte)))
self.assertRaises(ValueError, api_frame.parse)
class TestEscaping(unittest.TestCase):
"""
APIFrame class must properly escape and unescape data
"""
def test_escape_method(self):
"""
APIFrame.escape() must work as expected
"""
test_data = APIFrame.START_BYTE
new_data = APIFrame.escape(test_data)
self.assertEqual(new_data, APIFrame.ESCAPE_BYTE + b'\x5e')
def test_unescape_input(self):
"""
APIFrame must properly unescape escaped input
"""
test_data = b'\x7D\x23'
expected_data = b'\x03'
frame = APIFrame(escaped=True)
for byte in [test_data[x:x+1] for x in range(0, len(test_data))]:
frame.fill(byte)
self.assertEqual(frame.raw_data, expected_data)
|
mit
|
woodpecker1/phantomjs
|
src/qt/qtwebkit/Tools/QueueStatusServer/handlers/gc.py
|
146
|
2038
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp
from model.queuestatus import QueueStatus
class GC(webapp.RequestHandler):
def get(self):
statuses = QueueStatus.all().order("-date")
seen_queues = set()
for status in statuses:
if status.active_patch_id or status.active_bug_id:
continue
if status.queue_name in seen_queues:
status.delete()
seen_queues.add(status.queue_name)
self.response.out.write("Done!")
|
bsd-3-clause
|
arnoldlu/lisa
|
libs/utils/android/benchmark.py
|
3
|
13801
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import logging
import os
import select
from subprocess import Popen, PIPE
from time import sleep
from conf import LisaLogging
from android import System, Workload
from env import TestEnv
from devlib.utils.misc import memoized
from devlib.utils.android import fastboot_command
class LisaBenchmark(object):
"""
A base class for LISA custom benchmarks execution
This class is intended to be subclassed in order to create a custom
benckmark execution for LISA.
It sets up the TestEnv and and provides convenience methods for
test environment setup, execution and post-processing.
Subclasses should provide a bm_conf to setup the TestEnv and
a set of optional callback methods to configuere a test environment
and process collected data.
Example users of this class can be found under LISA's tests/benchmarks
directory.
"""
bm_conf = {
# Target platform and board
"platform" : 'android',
# Define devlib modules to load
"modules" : [
'cpufreq',
'cpuidle',
],
# FTrace events to collect for all the tests configuration which have
# the "ftrace" flag enabled
"ftrace" : {
"events" : [
"sched_switch",
"sched_overutilized",
"sched_contrib_scale_f",
"sched_load_avg_cpu",
"sched_load_avg_task",
"sched_tune_tasks_update",
"sched_boost_cpu",
"sched_boost_task",
"sched_energy_diff",
"cpu_frequency",
"cpu_idle",
"cpu_capacity",
],
"buffsize" : 10 * 1024,
},
# Default EnergyMeter Configuration
"emeter" : {
"instrument" : "acme",
"channel_map" : {
"Device0" : 0,
}
},
# Tools required by the experiments
"tools" : [ 'trace-cmd' ],
}
"""Override this with a dictionary or JSON path to configure the TestEnv"""
bm_name = None
"""Override this with the name of the LISA's benchmark to run"""
bm_params = None
"""Override this with the set of parameters for the LISA's benchmark to run"""
bm_collect = None
"""Override this with the set of data to collect during test exeution"""
bm_reboot = False
"""Override this with True if a boot image was passed as command line parameter"""
bm_iterations = 1
"""Override this with the desired number of iterations of the test"""
bm_iterations_pause = 30
"""
Override this with the desired amount of time (in seconds) to pause
for before each iteration
"""
bm_iterations_reboot = False
"""
Override this with the desired behaviour: reboot or not reboot before
each iteration
"""
def benchmarkInit(self):
"""
Code executed before running the benchmark
"""
pass
def benchmarkFinalize(self):
"""
Code executed after running the benchmark
"""
pass
################################################################################
# Private Interface
@memoized
def _parseCommandLine(self):
parser = argparse.ArgumentParser(
description='LISA Benchmark Configuration')
# Bootup settings
parser.add_argument('--boot-image', type=str,
default=None,
help='Path of the Android boot.img to be used')
parser.add_argument('--boot-timeout', type=int,
default=60,
help='Timeout in [s] to wait after a reboot (default 60)')
# Android settings
parser.add_argument('--android-device', type=str,
default=None,
help='Identifier of the Android target to use')
parser.add_argument('--android-home', type=str,
default=None,
help='Path used to configure ANDROID_HOME')
# Test customization
parser.add_argument('--results-dir', type=str,
default=self.__class__.__name__,
help='Results folder, '
'if specified override test defaults')
parser.add_argument('--collect', type=str,
default=None,
help='Set of metrics to collect, '
'e.g. "energy systrace_30" to sample energy and collect a 30s systrace, '
'if specified overrides test defaults')
parser.add_argument('--iterations', type=int,
default=1,
help='Number of iterations the same test has to be repeated for (default 1)')
parser.add_argument('--iterations-pause', type=int,
default=30,
help='Amount of time (in seconds) to pause for before each iteration (default 30s)')
parser.add_argument('--iterations-reboot', action="store_true",
help='Reboot before each iteration (default False)')
# Measurements settings
parser.add_argument('--iio-channel-map', type=str,
default=None,
help='List of IIO channels to sample, '
'e.g. "ch0:0,ch3:1" to sample CHs 0 and 3, '
'if specified overrides test defaults')
# Parse command line arguments
return parser.parse_args()
def _getBmConf(self):
# Override default configuration with command line parameters
if self.args.boot_image:
self.bm_reboot = True
if self.args.android_device:
self.bm_conf['device'] = self.args.android_device
if self.args.android_home:
self.bm_conf['ANDROID_HOME'] = self.args.android_home
if self.args.results_dir:
self.bm_conf['results_dir'] = self.args.results_dir
if self.args.collect:
self.bm_collect = self.args.collect
if self.args.iterations:
self.bm_iterations = self.args.iterations
if self.args.iterations_pause:
self.bm_iterations_pause = self.args.iterations_pause
if self.args.iterations_reboot:
self.bm_iterations_reboot = True
# Override energy meter configuration
if self.args.iio_channel_map:
em = {
'instrument' : 'acme',
'channel_map' : {},
}
for ch in self.args.iio_channel_map.split(','):
ch_name, ch_id = ch.split(':')
em['channel_map'][ch_name] = ch_id
self.bm_conf['emeter'] = em
self._log.info('Using ACME energy meter channels: %s', em)
# Override EM if energy collection not required
if 'energy' not in self.bm_collect:
try:
self.bm_conf.pop('emeter')
except:
pass
return self.bm_conf
def _getWorkload(self):
if self.bm_name is None:
msg = 'Benchmark subclasses must override the `bm_name` attribute'
raise NotImplementedError(msg)
# Get a referench to the worload to run
wl = Workload.getInstance(self.te, self.bm_name)
if wl is None:
raise ValueError('Specified benchmark [{}] is not supported'\
.format(self.bm_name))
return wl
def _getBmParams(self):
if self.bm_params is None:
msg = 'Benchmark subclasses must override the `bm_params` attribute'
raise NotImplementedError(msg)
return self.bm_params
def _getBmCollect(self):
if self.bm_collect is None:
msg = 'Benchmark subclasses must override the `bm_collect` attribute'
self._log.warning(msg)
return ''
return self.bm_collect
def _preInit(self):
"""
Code executed before running the benchmark
"""
# If iterations_reboot is True we are going to reboot before the
# first iteration anyway.
if self.bm_reboot and not self.bm_iterations_reboot:
self.reboot_target()
self.iterations_count = 1
def _preRun(self):
"""
Code executed before every iteration of the benchmark
"""
rebooted = False
if self.bm_reboot and self.bm_iterations_reboot:
rebooted = self.reboot_target()
if not rebooted and self.iterations_count > 1:
self._log.info('Waiting {}[s] before executing iteration {}...'\
.format(self.bm_iterations_pause, self.iterations_count))
sleep(self.bm_iterations_pause)
self.iterations_count += 1
def __init__(self):
"""
Set up logging and trigger running experiments
"""
LisaLogging.setup()
self._log = logging.getLogger('Benchmark')
self._log.info('=== CommandLine parsing...')
self.args = self._parseCommandLine()
self._log.info('=== TestEnv setup...')
self.bm_conf = self._getBmConf()
self.te = TestEnv(self.bm_conf)
self.target = self.te.target
self._log.info('=== Initialization...')
self.wl = self._getWorkload()
self.out_dir=self.te.res_dir
try:
self._preInit()
self.benchmarkInit()
except:
self._log.warning('Benchmark initialization failed: execution aborted')
raise
self._log.info('=== Execution...')
for iter_id in range(1, self.bm_iterations+1):
self._log.info('=== Iteration {}/{}...'.format(iter_id, self.bm_iterations))
out_dir = os.path.join(self.out_dir, "{:03d}".format(iter_id))
try:
os.makedirs(out_dir)
except: pass
self._preRun()
self.wl.run(out_dir=out_dir,
collect=self._getBmCollect(),
**self.bm_params)
self._log.info('=== Finalization...')
self.benchmarkFinalize()
def _wait_for_logcat_idle(self, seconds=1):
lines = 0
# Clear logcat
# os.system('{} logcat -s {} -c'.format(adb, DEVICE));
self.target.clear_logcat()
# Dump logcat output
logcat_cmd = 'adb -s {} logcat'.format(self.target.adb_name)
logcat = Popen(logcat_cmd, shell=True, stdout=PIPE)
logcat_poll = select.poll()
logcat_poll.register(logcat.stdout, select.POLLIN)
# Monitor logcat until it's idle for the specified number of [s]
self._log.info('Waiting for system to be almost idle')
self._log.info(' i.e. at least %d[s] of no logcat messages', seconds)
while True:
poll_result = logcat_poll.poll(seconds * 1000)
if not poll_result:
break
lines = lines + 1
line = logcat.stdout.readline(1024)
if lines % 1000:
self._log.debug(' still waiting...')
if lines > 1e6:
self._log.warning('device logcat seems quite busy, '
'continuing anyway... ')
break
def reboot_target(self, disable_charge=True):
"""
Reboot the target if a "boot-image" has been specified
If the user specify a boot-image as a command line parameter, this
method will reboot the target with the specified kernel and wait
for the target to be up and running.
"""
rebooted = False
# Reboot the device, if a boot_image has been specified
if self.args.boot_image:
self._log.warning('=== Rebooting...')
self._log.warning('Rebooting image to use: %s', self.args.boot_image)
self._log.debug('Waiting 6[s] to enter bootloader...')
self.target.adb_reboot_bootloader()
sleep(6)
# self._fastboot('boot {}'.format(self.args.boot_image))
cmd = 'boot {}'.format(self.args.boot_image)
fastboot_command(cmd, device=self.target.adb_name)
self._log.debug('Waiting {}[s] for boot to start...'\
.format(self.args.boot_timeout))
sleep(self.args.boot_timeout)
rebooted = True
else:
self._log.warning('Device NOT rebooted, using current image')
# Restart ADB in root mode
self._log.warning('Restarting ADB in root mode...')
self.target.adb_root(force=True)
# TODO add check for kernel SHA1
self._log.warning('Skipping kernel SHA1 cross-check...')
# Disable charge via USB
if disable_charge:
self._log.debug('Disabling charge over USB...')
self.target.charging_enabled = False
# Log current kernel version
self._log.info('Running with kernel:')
self._log.info(' %s', self.target.kernel_version)
# Wait for the system to complete the boot
self._wait_for_logcat_idle()
return rebooted
# vim :set tabstop=4 shiftwidth=4 expandtab
|
apache-2.0
|
573719929/Common-Processor
|
gen-py/hbase/ttypes.py
|
1
|
38818
|
#
# Autogenerated by Thrift Compiler (0.8.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class TCell:
"""
TCell - Used to transport a cell value (byte[]) and the timestamp it was
stored with together as a result for get and getRow methods. This promotes
the timestamp of a cell to a first-class value, making it easy to take
note of temporal data. Cell is used all the way from HStore up to HTable.
Attributes:
- value
- timestamp
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'value', None, None, ), # 1
(2, TType.I64, 'timestamp', None, None, ), # 2
)
def __init__(self, value=None, timestamp=None,):
self.value = value
self.timestamp = timestamp
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TCell')
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 1)
oprot.writeString(self.value)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 2)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ColumnDescriptor:
"""
An HColumnDescriptor contains information about a column family
such as the number of versions, compression settings, etc. It is
used as input when creating a table or adding a column.
Attributes:
- name
- maxVersions
- compression
- inMemory
- bloomFilterType
- bloomFilterVectorSize
- bloomFilterNbHashes
- blockCacheEnabled
- timeToLive
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.I32, 'maxVersions', None, 3, ), # 2
(3, TType.STRING, 'compression', None, "NONE", ), # 3
(4, TType.BOOL, 'inMemory', None, False, ), # 4
(5, TType.STRING, 'bloomFilterType', None, "NONE", ), # 5
(6, TType.I32, 'bloomFilterVectorSize', None, 0, ), # 6
(7, TType.I32, 'bloomFilterNbHashes', None, 0, ), # 7
(8, TType.BOOL, 'blockCacheEnabled', None, False, ), # 8
(9, TType.I32, 'timeToLive', None, -1, ), # 9
)
def __init__(self, name=None, maxVersions=thrift_spec[2][4], compression=thrift_spec[3][4], inMemory=thrift_spec[4][4], bloomFilterType=thrift_spec[5][4], bloomFilterVectorSize=thrift_spec[6][4], bloomFilterNbHashes=thrift_spec[7][4], blockCacheEnabled=thrift_spec[8][4], timeToLive=thrift_spec[9][4],):
self.name = name
self.maxVersions = maxVersions
self.compression = compression
self.inMemory = inMemory
self.bloomFilterType = bloomFilterType
self.bloomFilterVectorSize = bloomFilterVectorSize
self.bloomFilterNbHashes = bloomFilterNbHashes
self.blockCacheEnabled = blockCacheEnabled
self.timeToLive = timeToLive
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.maxVersions = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.compression = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.inMemory = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.bloomFilterType = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.bloomFilterVectorSize = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.bloomFilterNbHashes = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.blockCacheEnabled = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I32:
self.timeToLive = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ColumnDescriptor')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.maxVersions is not None:
oprot.writeFieldBegin('maxVersions', TType.I32, 2)
oprot.writeI32(self.maxVersions)
oprot.writeFieldEnd()
if self.compression is not None:
oprot.writeFieldBegin('compression', TType.STRING, 3)
oprot.writeString(self.compression)
oprot.writeFieldEnd()
if self.inMemory is not None:
oprot.writeFieldBegin('inMemory', TType.BOOL, 4)
oprot.writeBool(self.inMemory)
oprot.writeFieldEnd()
if self.bloomFilterType is not None:
oprot.writeFieldBegin('bloomFilterType', TType.STRING, 5)
oprot.writeString(self.bloomFilterType)
oprot.writeFieldEnd()
if self.bloomFilterVectorSize is not None:
oprot.writeFieldBegin('bloomFilterVectorSize', TType.I32, 6)
oprot.writeI32(self.bloomFilterVectorSize)
oprot.writeFieldEnd()
if self.bloomFilterNbHashes is not None:
oprot.writeFieldBegin('bloomFilterNbHashes', TType.I32, 7)
oprot.writeI32(self.bloomFilterNbHashes)
oprot.writeFieldEnd()
if self.blockCacheEnabled is not None:
oprot.writeFieldBegin('blockCacheEnabled', TType.BOOL, 8)
oprot.writeBool(self.blockCacheEnabled)
oprot.writeFieldEnd()
if self.timeToLive is not None:
oprot.writeFieldBegin('timeToLive', TType.I32, 9)
oprot.writeI32(self.timeToLive)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRegionInfo:
"""
A TRegionInfo contains information about an HTable region.
Attributes:
- startKey
- endKey
- id
- name
- version
- serverName
- port
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'startKey', None, None, ), # 1
(2, TType.STRING, 'endKey', None, None, ), # 2
(3, TType.I64, 'id', None, None, ), # 3
(4, TType.STRING, 'name', None, None, ), # 4
(5, TType.BYTE, 'version', None, None, ), # 5
(6, TType.STRING, 'serverName', None, None, ), # 6
(7, TType.I32, 'port', None, None, ), # 7
)
def __init__(self, startKey=None, endKey=None, id=None, name=None, version=None, serverName=None, port=None,):
self.startKey = startKey
self.endKey = endKey
self.id = id
self.name = name
self.version = version
self.serverName = serverName
self.port = port
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.startKey = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.endKey = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.id = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BYTE:
self.version = iprot.readByte();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.serverName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I32:
self.port = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TRegionInfo')
if self.startKey is not None:
oprot.writeFieldBegin('startKey', TType.STRING, 1)
oprot.writeString(self.startKey)
oprot.writeFieldEnd()
if self.endKey is not None:
oprot.writeFieldBegin('endKey', TType.STRING, 2)
oprot.writeString(self.endKey)
oprot.writeFieldEnd()
if self.id is not None:
oprot.writeFieldBegin('id', TType.I64, 3)
oprot.writeI64(self.id)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 4)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.version is not None:
oprot.writeFieldBegin('version', TType.BYTE, 5)
oprot.writeByte(self.version)
oprot.writeFieldEnd()
if self.serverName is not None:
oprot.writeFieldBegin('serverName', TType.STRING, 6)
oprot.writeString(self.serverName)
oprot.writeFieldEnd()
if self.port is not None:
oprot.writeFieldBegin('port', TType.I32, 7)
oprot.writeI32(self.port)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Mutation:
"""
A Mutation object is used to either update or delete a column-value.
Attributes:
- isDelete
- column
- value
- writeToWAL
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'isDelete', None, False, ), # 1
(2, TType.STRING, 'column', None, None, ), # 2
(3, TType.STRING, 'value', None, None, ), # 3
(4, TType.BOOL, 'writeToWAL', None, True, ), # 4
)
def __init__(self, isDelete=thrift_spec[1][4], column=None, value=None, writeToWAL=thrift_spec[4][4],):
self.isDelete = isDelete
self.column = column
self.value = value
self.writeToWAL = writeToWAL
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.isDelete = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.value = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.writeToWAL = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Mutation')
if self.isDelete is not None:
oprot.writeFieldBegin('isDelete', TType.BOOL, 1)
oprot.writeBool(self.isDelete)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 2)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.STRING, 3)
oprot.writeString(self.value)
oprot.writeFieldEnd()
if self.writeToWAL is not None:
oprot.writeFieldBegin('writeToWAL', TType.BOOL, 4)
oprot.writeBool(self.writeToWAL)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class BatchMutation:
"""
A BatchMutation object is used to apply a number of Mutations to a single row.
Attributes:
- row
- mutations
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'row', None, None, ), # 1
(2, TType.LIST, 'mutations', (TType.STRUCT,(Mutation, Mutation.thrift_spec)), None, ), # 2
)
def __init__(self, row=None, mutations=None,):
self.row = row
self.mutations = mutations
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.mutations = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = Mutation()
_elem5.read(iprot)
self.mutations.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('BatchMutation')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.mutations is not None:
oprot.writeFieldBegin('mutations', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.mutations))
for iter6 in self.mutations:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TIncrement:
"""
For increments that are not incrementColumnValue
equivalents.
Attributes:
- table
- row
- column
- ammount
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'table', None, None, ), # 1
(2, TType.STRING, 'row', None, None, ), # 2
(3, TType.STRING, 'column', None, None, ), # 3
(4, TType.I64, 'ammount', None, None, ), # 4
)
def __init__(self, table=None, row=None, column=None, ammount=None,):
self.table = table
self.row = row
self.column = column
self.ammount = ammount
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.table = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.column = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.ammount = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TIncrement')
if self.table is not None:
oprot.writeFieldBegin('table', TType.STRING, 1)
oprot.writeString(self.table)
oprot.writeFieldEnd()
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 2)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.column is not None:
oprot.writeFieldBegin('column', TType.STRING, 3)
oprot.writeString(self.column)
oprot.writeFieldEnd()
if self.ammount is not None:
oprot.writeFieldBegin('ammount', TType.I64, 4)
oprot.writeI64(self.ammount)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TColumn:
"""
Holds column name and the cell.
Attributes:
- columnName
- cell
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'columnName', None, None, ), # 1
(2, TType.STRUCT, 'cell', (TCell, TCell.thrift_spec), None, ), # 2
)
def __init__(self, columnName=None, cell=None,):
self.columnName = columnName
self.cell = cell
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.columnName = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.cell = TCell()
self.cell.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TColumn')
if self.columnName is not None:
oprot.writeFieldBegin('columnName', TType.STRING, 1)
oprot.writeString(self.columnName)
oprot.writeFieldEnd()
if self.cell is not None:
oprot.writeFieldBegin('cell', TType.STRUCT, 2)
self.cell.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRowResult:
"""
Holds row name and then a map of columns to cells.
Attributes:
- row
- columns
- sortedColumns
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'row', None, None, ), # 1
(2, TType.MAP, 'columns', (TType.STRING,None,TType.STRUCT,(TCell, TCell.thrift_spec)), None, ), # 2
(3, TType.LIST, 'sortedColumns', (TType.STRUCT,(TColumn, TColumn.thrift_spec)), None, ), # 3
)
def __init__(self, row=None, columns=None, sortedColumns=None,):
self.row = row
self.columns = columns
self.sortedColumns = sortedColumns
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.row = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.columns = {}
(_ktype8, _vtype9, _size7 ) = iprot.readMapBegin()
for _i11 in xrange(_size7):
_key12 = iprot.readString();
_val13 = TCell()
_val13.read(iprot)
self.columns[_key12] = _val13
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.sortedColumns = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in xrange(_size14):
_elem19 = TColumn()
_elem19.read(iprot)
self.sortedColumns.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TRowResult')
if self.row is not None:
oprot.writeFieldBegin('row', TType.STRING, 1)
oprot.writeString(self.row)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.columns))
for kiter20,viter21 in self.columns.items():
oprot.writeString(kiter20)
viter21.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.sortedColumns is not None:
oprot.writeFieldBegin('sortedColumns', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.sortedColumns))
for iter22 in self.sortedColumns:
iter22.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TScan:
"""
A Scan object is used to specify scanner parameters when opening a scanner.
Attributes:
- startRow
- stopRow
- timestamp
- columns
- caching
- filterString
- sortColumns
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'startRow', None, None, ), # 1
(2, TType.STRING, 'stopRow', None, None, ), # 2
(3, TType.I64, 'timestamp', None, None, ), # 3
(4, TType.LIST, 'columns', (TType.STRING,None), None, ), # 4
(5, TType.I32, 'caching', None, None, ), # 5
(6, TType.STRING, 'filterString', None, None, ), # 6
(7, TType.BOOL, 'sortColumns', None, None, ), # 7
)
def __init__(self, startRow=None, stopRow=None, timestamp=None, columns=None, caching=None, filterString=None, sortColumns=None,):
self.startRow = startRow
self.stopRow = stopRow
self.timestamp = timestamp
self.columns = columns
self.caching = caching
self.filterString = filterString
self.sortColumns = sortColumns
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.startRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.stopRow = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.columns = []
(_etype26, _size23) = iprot.readListBegin()
for _i27 in xrange(_size23):
_elem28 = iprot.readString();
self.columns.append(_elem28)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.caching = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.filterString = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.sortColumns = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TScan')
if self.startRow is not None:
oprot.writeFieldBegin('startRow', TType.STRING, 1)
oprot.writeString(self.startRow)
oprot.writeFieldEnd()
if self.stopRow is not None:
oprot.writeFieldBegin('stopRow', TType.STRING, 2)
oprot.writeString(self.stopRow)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.columns is not None:
oprot.writeFieldBegin('columns', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.columns))
for iter29 in self.columns:
oprot.writeString(iter29)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.caching is not None:
oprot.writeFieldBegin('caching', TType.I32, 5)
oprot.writeI32(self.caching)
oprot.writeFieldEnd()
if self.filterString is not None:
oprot.writeFieldBegin('filterString', TType.STRING, 6)
oprot.writeString(self.filterString)
oprot.writeFieldEnd()
if self.sortColumns is not None:
oprot.writeFieldBegin('sortColumns', TType.BOOL, 7)
oprot.writeBool(self.sortColumns)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class IOError(TException):
"""
An IOError exception signals that an error occurred communicating
to the Hbase master or an Hbase region server. Also used to return
more general Hbase error conditions.
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('IOError')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class IllegalArgument(TException):
"""
An IllegalArgument exception indicates an illegal or invalid
argument was passed into a procedure.
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('IllegalArgument')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AlreadyExists(TException):
"""
An AlreadyExists exceptions signals that a table with the specified
name already exists
Attributes:
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'message', None, None, ), # 1
)
def __init__(self, message=None,):
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AlreadyExists')
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 1)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
apache-2.0
|
Workday/OpenFrame
|
tools/telemetry/third_party/gsutilz/third_party/apitools/apitools/base/py/testing/mock.py
|
22
|
11755
|
"""The mock module allows easy mocking of apitools clients.
This module allows you to mock out the constructor of a particular apitools
client, for a specific API and version. Then, when the client is created, it
will be run against an expected session that you define. This way code that is
not aware of the testing framework can construct new clients as normal, as long
as it's all done within the context of a mock.
"""
import difflib
from protorpc import messages
import six
import apitools.base.py as apitools_base
class Error(Exception):
"""Exceptions for this module."""
def _MessagesEqual(msg1, msg2):
"""Compare two protorpc messages for equality.
Using python's == operator does not work in all cases, specifically when
there is a list involved.
Args:
msg1: protorpc.messages.Message or [protorpc.messages.Message] or number
or string, One of the messages to compare.
msg2: protorpc.messages.Message or [protorpc.messages.Message] or number
or string, One of the messages to compare.
Returns:
If the messages are isomorphic.
"""
if isinstance(msg1, list) and isinstance(msg2, list):
if len(msg1) != len(msg2):
return False
return all(_MessagesEqual(x, y) for x, y in zip(msg1, msg2))
if (not isinstance(msg1, messages.Message) or
not isinstance(msg2, messages.Message)):
return msg1 == msg2
for field in msg1.all_fields():
field1 = getattr(msg1, field.name)
field2 = getattr(msg2, field.name)
if not _MessagesEqual(field1, field2):
return False
return True
class UnexpectedRequestException(Error):
def __init__(self, received_call, expected_call):
expected_key, expected_request = expected_call
received_key, received_request = received_call
expected_repr = apitools_base.MessageToRepr(
expected_request, multiline=True)
received_repr = apitools_base.MessageToRepr(
received_request, multiline=True)
expected_lines = expected_repr.splitlines()
received_lines = received_repr.splitlines()
diff_lines = difflib.unified_diff(expected_lines, received_lines)
diff = '\n'.join(diff_lines)
if expected_key != received_key:
msg = '\n'.join((
'expected: {expected_key}({expected_request})',
'received: {received_key}({received_request})',
'',
)).format(
expected_key=expected_key,
expected_request=expected_repr,
received_key=received_key,
received_request=received_repr)
super(UnexpectedRequestException, self).__init__(msg)
else:
msg = '\n'.join((
'for request to {key},',
'expected: {expected_request}',
'received: {received_request}',
'diff: {diff}',
'',
)).format(
key=expected_key,
expected_request=expected_repr,
received_request=received_repr,
diff=diff)
super(UnexpectedRequestException, self).__init__(msg)
class ExpectedRequestsException(Error):
def __init__(self, expected_calls):
msg = 'expected:\n'
for (key, request) in expected_calls:
msg += '{key}({request})\n'.format(
key=key,
request=apitools_base.MessageToRepr(request, multiline=True))
super(ExpectedRequestsException, self).__init__(msg)
class _ExpectedRequestResponse(object):
"""Encapsulation of an expected request and corresponding response."""
def __init__(self, key, request, response=None, exception=None):
self.__key = key
self.__request = request
if response and exception:
raise apitools_base.ConfigurationValueError(
'Should specify at most one of response and exception')
if response and isinstance(response, apitools_base.Error):
raise apitools_base.ConfigurationValueError(
'Responses should not be an instance of Error')
if exception and not isinstance(exception, apitools_base.Error):
raise apitools_base.ConfigurationValueError(
'Exceptions must be instances of Error')
self.__response = response
self.__exception = exception
@property
def key(self):
return self.__key
@property
def request(self):
return self.__request
def ValidateAndRespond(self, key, request):
"""Validate that key and request match expectations, and respond if so.
Args:
key: str, Actual key to compare against expectations.
request: protorpc.messages.Message or [protorpc.messages.Message]
or number or string, Actual request to compare againt expectations
Raises:
UnexpectedRequestException: If key or request dont match
expectations.
apitools_base.Error: If a non-None exception is specified to
be thrown.
Returns:
The response that was specified to be returned.
"""
if key != self.__key or not _MessagesEqual(request, self.__request):
raise UnexpectedRequestException((key, request),
(self.__key, self.__request))
if self.__exception:
# Can only throw apitools_base.Error.
raise self.__exception # pylint: disable=raising-bad-type
return self.__response
class _MockedService(apitools_base.BaseApiService):
def __init__(self, key, mocked_client, methods, real_service):
super(_MockedService, self).__init__(mocked_client)
self.__dict__.update(real_service.__dict__)
for method in methods:
real_method = None
if real_service:
real_method = getattr(real_service, method)
setattr(self, method,
_MockedMethod(key + '.' + method,
mocked_client,
real_method))
class _MockedMethod(object):
"""A mocked API service method."""
def __init__(self, key, mocked_client, real_method):
self.__key = key
self.__mocked_client = mocked_client
self.__real_method = real_method
def Expect(self, request, response=None, exception=None, **unused_kwargs):
"""Add an expectation on the mocked method.
Exactly one of response and exception should be specified.
Args:
request: The request that should be expected
response: The response that should be returned or None if
exception is provided.
exception: An exception that should be thrown, or None.
"""
# TODO(jasmuth): the unused_kwargs provides a placeholder for
# future things that can be passed to Expect(), like special
# params to the method call.
# pylint: disable=protected-access
# Class in same module.
self.__mocked_client._request_responses.append(
_ExpectedRequestResponse(self.__key,
request,
response=response,
exception=exception))
# pylint: enable=protected-access
def __call__(self, request, **unused_kwargs):
# TODO(jasmuth): allow the testing code to expect certain
# values in these currently unused_kwargs, especially the
# upload parameter used by media-heavy services like bigquery
# or bigstore.
# pylint: disable=protected-access
# Class in same module.
if self.__mocked_client._request_responses:
request_response = self.__mocked_client._request_responses.pop(0)
else:
raise UnexpectedRequestException(
(self.__key, request), (None, None))
# pylint: enable=protected-access
response = request_response.ValidateAndRespond(self.__key, request)
if response is None and self.__real_method:
response = self.__real_method(request)
print(apitools_base.MessageToRepr(
response, multiline=True, shortstrings=True))
return response
return response
def _MakeMockedServiceConstructor(mocked_service):
def Constructor(unused_self, unused_client):
return mocked_service
return Constructor
class Client(object):
"""Mock an apitools client."""
def __init__(self, client_class, real_client=None):
"""Mock an apitools API, given its class.
Args:
client_class: The class for the API. eg, if you
from apis.sqladmin import v1beta3
then you can pass v1beta3.SqladminV1beta3 to this class
and anything within its context will use your mocked
version.
real_client: apitools Client, The client to make requests
against when the expected response is None.
"""
if not real_client:
real_client = client_class(get_credentials=False)
self.__client_class = client_class
self.__real_service_classes = {}
self.__real_client = real_client
self._request_responses = []
def __enter__(self):
return self.Mock()
def Mock(self):
"""Stub out the client class with mocked services."""
client = self.__real_client or self.__client_class(
get_credentials=False)
for name in dir(self.__client_class):
service_class = getattr(self.__client_class, name)
if not isinstance(service_class, type):
continue
if not issubclass(service_class, apitools_base.BaseApiService):
continue
self.__real_service_classes[name] = service_class
service = service_class(client)
# pylint: disable=protected-access
# Some liberty is allowed with mocking.
collection_name = service_class._NAME
# pylint: enable=protected-access
api_name = '%s_%s' % (self.__client_class._PACKAGE,
self.__client_class._URL_VERSION)
mocked_service = _MockedService(
api_name + '.' + collection_name, self,
service._method_configs.keys(),
service if self.__real_client else None)
mocked_constructor = _MakeMockedServiceConstructor(mocked_service)
setattr(self.__client_class, name, mocked_constructor)
setattr(self, collection_name, mocked_service)
self.__real_include_fields = self.__client_class.IncludeFields
self.__client_class.IncludeFields = self.IncludeFields
return self
def __exit__(self, exc_type, value, traceback):
self.Unmock()
if value:
six.reraise(exc_type, value, traceback)
return True
def Unmock(self):
for name, service_class in self.__real_service_classes.items():
setattr(self.__client_class, name, service_class)
if self._request_responses:
raise ExpectedRequestsException(
[(rq_rs.key, rq_rs.request) for rq_rs
in self._request_responses])
self.__client_class.IncludeFields = self.__real_include_fields
def IncludeFields(self, include_fields):
if self.__real_client:
return self.__real_include_fields(self.__real_client,
include_fields)
|
bsd-3-clause
|
kaplun/inspire-next
|
inspirehep/modules/theme/ext.py
|
2
|
4001
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Invenio standard theme."""
from __future__ import absolute_import, division, print_function
from flask_breadcrumbs import Breadcrumbs
from flask_login import user_logged_in
from flask_menu import Menu
from .views import blueprint, unauthorized, insufficient_permissions, \
page_not_found, internal_error
from pkg_resources import resource_filename
from inspirehep.modules.records.permissions import load_user_collections
class INSPIRETheme(object):
"""Invenio theme extension."""
def __init__(self, app=None, **kwargs):
"""Extension initialization."""
self.menu_ext = Menu()
self.menu = None
self.breadcrumbs = Breadcrumbs()
self.weblinks = self.init_weblinks_dictionary()
if app:
self.init_app(app, **kwargs)
self.setup_app(app)
def init_app(self, app, assets=None, **kwargs):
"""Initialize application object."""
self.init_config(app.config)
# Initialize extensions
self.menu_ext.init_app(app)
self.menu = app.extensions['menu']
self.breadcrumbs.init_app(app)
app.register_blueprint(blueprint)
# Configure Jinja2 environment.
app.jinja_env.add_extension('jinja2.ext.do')
app.jinja_env.lstrip_blocks = True
app.jinja_env.trim_blocks = True
# Register errors handlers.
app.register_error_handler(401, unauthorized)
app.register_error_handler(403, insufficient_permissions)
app.register_error_handler(404, page_not_found)
app.register_error_handler(500, internal_error)
user_logged_in.connect(load_user_collections, app)
# Save reference to self on object
app.extensions['inspire-theme'] = self
def init_config(self, config):
"""Initialize configuration."""
from .bundles import js
# Set JS bundles to exclude for purpose of avoiding double jQuery etc.
# when other modules are building their JS bundles.
config.setdefault("THEME_BASE_BUNDLES_EXCLUDE_JS", [js])
config.setdefault("BASE_TEMPLATE", "inspirehep_theme/page.html")
def setup_app(self, app):
"""Initialize Gravatar extension."""
from flask_gravatar import Gravatar
gravatar = Gravatar(app,
size=app.config.get('GRAVATAR_SIZE', 100),
rating=app.config.get('GRAVATAR_RATING', 'g'),
default=app.config.get(
'GRAVATAR_DEFAULT', 'retro'),
force_default=False,
force_lower=False)
del gravatar
return app
def init_weblinks_dictionary(self):
"""Initialize the dictionary for HEP detailed record external links."""
weblinks_dictionary = {}
with open(resource_filename('inspirehep', 'kbs/weblinks.kb'), 'r') as kb_file:
for line in kb_file:
(key, val) = line.split('---')
weblinks_dictionary[key] = val
return weblinks_dictionary
|
gpl-3.0
|
jmighion/ansible
|
lib/ansible/modules/windows/win_scheduled_task.py
|
21
|
17716
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_scheduled_task
version_added: "2.0"
short_description: Manage scheduled tasks
description:
- Creates/modified or removes Windows scheduled tasks.
notes:
- In Ansible 2.4 and earlier, this could only be run on Server 2012/Windows 8
or newer. Since 2.5 this restriction has been lifted.
- The option names and structure for actions and triggers of a service follow
the C(RegisteredTask) naming standard and requirements, it would be useful to
read up on this guide if coming across any issues U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa382542.aspx).
options:
# module definition options
name:
description: The name of the scheduled task without the path.
required: true
path:
description:
- Task folder in which this task will be stored.
- Will create the folder when C(state=present) and the folder does not
already exist.
- Will remove the folder when C(state=absent) and there are no tasks left
in the folder.
default: \
state:
description:
- When C(state=present) will ensure the task exists.
- When C(state=absent) will ensure the task does not exist.
choices: [ absent, present ]
default: present
# Action options
actions:
description:
- A list of action to configure for the task.
- See suboptions for details on how to construct each list entry.
- When creating a task there MUST be at least one action but when deleting
a task this can be a null or an empty list.
- The ordering of this list is important, the module will ensure the order
is kept when modifying the task.
- This module only supports the C(ExecAction) type but can still delete the
older legacy types.
suboptions:
path:
description:
- The path to the executable for the ExecAction.
required: true
arguments:
description:
- An argument string to supply for the executable.
working_directory:
description:
- The working directory to run the executable from.
version_added: '2.5'
arguments:
description:
- Arguments to provide for a scheduled task action.
- DEPRECATED since 2.5, use the C(actions) option instead to specify a list
of actions to run.
- Will be removed in 2.7.
aliases: [ argument ]
executable:
description:
- The path to the executable to run for a scheduled task action.
- DEPRECATED since 2.5, use the C(actions) option instead to specify a list
of actions to run.
- Will be removed in 2.7.
# Trigger options
triggers:
description:
- A list of triggers to configure for the task.
- See suboptions for details on how to construct each list entry.
- The ordering of this list is important, the module will ensure the order
is kept when modifying the task.
- There are multiple types of triggers, see U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa383868.aspx)
for a list of trigger types and their options.
- The suboption options listed below are not required for all trigger
types, read the description for more details.
suboptions:
type:
description:
- The trigger type, this value controls what below options are
required.
required: true
choices: [ boot, daily, event, idle, logon, monthlydow, monthly, registration, time, weekly, session_state_change ]
enabled:
description:
- Whether to set the trigger to enabled or disabled
- Used in all trigger types.
type: bool
start_boundary:
description:
- The start time for the task, even if the trigger meets the other
start criteria, it won't start until this time is met.
- If you wish to run a task at 9am on a day you still need to specify
the date on which the trigger is activated, you can set any date even
ones in the past.
- Required when C(type) is C(daily), C(monthlydow), C(monthly),
C(time), C(weekly), (session_state_change).
- Optional for the rest of the trigger types.
- This is in ISO 8601 DateTime format C(YYYY-MM-DDThh:mm:ss).
end_boundary:
description:
- The end time for when the trigger is deactivated.
- This is in ISO 8601 DateTime format C(YYYY-MM-DDThh:mm:ss).
execution_time_limit:
description:
- The maximum amount of time that the task is allowed to run for.
- Optional for all the trigger types.
- Is in the ISO 8601 Duration format C(P[n]Y[n]M[n]DT[n]H[n]M[n]S).
delay:
description:
- The time to delay the task from running once the trigger has been
fired.
- Optional when C(type) is C(boot), C(event), C(logon),
C(registration), C(session_state_change).
- Is in the ISO 8601 Duration format C(P[n]Y[n]M[n]DT[n]H[n]M[n]S).
random_delay:
description:
- The delay time that is randomly added to the start time of the
trigger.
- Optional when C(type) is C(daily), C(monthlydow), C(monthly),
C(time), C(weekly).
- Is in the ISO 8601 Duration format C(P[n]Y[n]M[n]DT[n]H[n]M[n]S).
subscription:
description:
- Only used and is required for C(type=event).
- The XML query string that identifies the event that fires the
trigger.
user_id:
description:
- The username that the trigger will target.
- Optional when C(type) is C(logon), C(session_state_change).
- Can be the username or SID of a user.
- When C(type=logon) and you want the trigger to fire when a user in a
group logs on, leave this as null and set C(group) to the group you
wish to trigger.
days_of_week:
description:
- The days of the week for the trigger.
- Can be a list or comma separated string of full day names e.g. monday
instead of mon.
- Required when C(type) is C(weekly), C(type=session_state_change).
- Optional when C(type=monthlydow).
days_of_month:
description:
- The days of the month from 1 to 31 for the triggers.
- If you wish to set the trigger for the last day of any month
use C(run_on_last_day_of_month).
- Can be a list or comma separated string of day numbers.
- Required when C(type=monthly).
weeks_of_month:
description:
- The weeks of the month for the trigger.
- Can be a list or comma separated string of the numbers 1 to 4
representing the first to 4th week of the month.
- Optional when C(type=monthlydow).
months_of_year:
description:
- The months of the year for the trigger.
- Can be a list or comma separated string of full month names e.g.
march instead of mar.
- Optional when C(type) is C(monthlydow), C(monthly).
run_on_last_week_of_month:
description:
- Boolean value that sets whether the task runs on the last week of the
month.
- Optional when C(type) is C(monthlydow).
type: bool
run_on_last_day_of_month:
description:
- Boolean value that sets whether the task runs on the last day of the
month.
- Optional when C(type) is C(monthly).
type: bool
weeks_interval:
description:
- The interval of weeks to run on, e.g. C(1) means every week while
C(2) means every other week.
- Optional when C(type=weekly).
version_added: '2.5'
days_of_week:
description:
- Days of the week to run a weekly task.
- Specify a list or comma separate days in the full version, e.g. monday
instead of mon.
- DEPRECATED since 2.5, use the C(triggers) option list with the type of
C(monthlydow) or C(weekly).
- Will be removed in 2.7.
frequency:
description:
- The frequency of the task to run.
- DEPRECATED since 2.5, use the C(triggers) option list and specify the
type based on the frequency required.
- Will be removed in 2.7.
choices: [ daily, once, weekly ]
time:
description:
- The start time to execute the scheduled task.
- DEPRECATED since 2.5, use the C(triggers) option list and use the
C(start_boundary) option to set the start time.
- Will be removed in 2.7.
# Principal options
display_name:
description:
- The name of the user/group that is displayed in the Task Scheduler UI.
version_added: '2.5'
group:
description:
- The group that will run the task.
- C(group) and C(username) are exclusive to each other and cannot be set
at the same time.
- C(logon_type) can either be not set or equal C(group).
version_added: '2.5'
logon_type:
description:
- The logon method that the task will run with.
- C(password) means the password will be stored and the task has access
to network resources.
- C(s4u) means the existing token will be used to run the task and no
password will be stored with the task. Means no network or encrypted
files access.
- C(interactive_token) means the user must already be logged on
interactively and will run in an existing interactive session.
- C(group) means that the task will run as a group.
- C(service_account) means that a service account like System, Local
Service or Network Service will run the task.
choices: [ none, password, s4u, interactive_token, group, service_account, token_or_password ]
version_added: '2.5'
run_level:
description:
- The level of user rights used to run the task.
- If not specified the task will be created with limited rights.
choices: [ limited, highest ]
version_added: '2.4'
aliases: [ runlevel ]
username:
description:
- The user to run the scheduled task as.
- Will default to the current user under an interactive token if not
specified during creation.
aliases: [ user ]
password:
description:
- The password for the user account to run the scheduled task as.
- This is required when running a task without the user being logged in,
excluding the builtin service accounts.
- If set, will always result in a change unless C(update_password) is set
to C(no) and no othr changes are required for the service.
version_added: '2.4'
update_password:
description:
- Whether to update the password even when not other changes have occured.
- When C(yes) will always result in a change when executing the module.
type: bool
default: 'yes'
version_added: '2.5'
store_password:
description:
- Whether to store the password for the user running the task.
- If C(no), the task will only have access to local resources.
- DEPRECATED since 2.5, use C(logon_type=password) to set whether to store
the password for the task.
- Will be removed in 2.7.
type: bool
default: 'yes'
version_added: '2.4'
# RegistrationInfo options
author:
description:
- The author of the task.
version_added: '2.5'
date:
description:
- The date when the task was registered.
version_added: '2.5'
description:
description:
- The description of the task.
version_added: '2.5'
source:
description:
- The source of the task.
version_added: '2.5'
version:
description:
- The version number of the task.
version_added: '2.5'
# Settings options
allow_demand_start:
description:
- Whether the task can be started by using either the Run command or the
Context menu.
type: bool
version_added: '2.5'
allow_hard_terminate:
description:
- Whether the task can be terminated by using TerminateProcess.
type: bool
version_added: '2.5'
compatibility:
description:
- The integer value with indicates which version of Task Scheduler a task
is compatible with.
- C(0) means the task is compatible with the AT command.
- C(1) means the task is compatible with Task Scheduler 1.0.
- C(2) means the task is compatible with Task Scheduler 2.0.
choices: [ 0, 1, 2 ]
version_added: '2.5'
delete_expired_task_after:
description:
- The amount of time that the Task Scheduler will wait before deleting the
task after it expires.
- A task expires after the end_boundary has been exceeded for all triggers
associated with the task.
- This is in the ISO 8601 Duration format C(P[n]Y[n]M[n]DT[n]H[n]M[n]S).
version_added: '2.5'
disallow_start_if_on_batteries:
description:
- Whether the task will not be started if the computer is running on
battery power.
type: bool
version_added: '2.5'
enabled:
description:
- Whether the task is enabled, the task can only run when C(yes).
type: bool
version_added: '2.5'
execution_time_limit:
description:
- The amount of time allowed to complete the task.
- When not set, the time limit is infinite.
- This is in the ISO 8601 Duration format C(P[n]Y[n]M[n]DT[n]H[n]M[n]S).
version_added: '2.5'
hidden:
description:
- Whether the task will be hidden in the UI.
type: bool
version_added: '2.5'
multiple_instances:
description:
- An integer that indicates the behaviour when starting a task that is
already running.
- C(0) will start a new instance in parallel with existing instances of
that task.
- C(1) will wait until other instances of that task to finish running
before starting itself.
- C(2) will not start a new instance if another is running.
- C(3) will stop other instances of the task and start the new one.
choices: [ 0, 1, 2, 3 ]
version_added: '2.5'
priority:
description:
- The priority level (0-10) of the task.
- When creating a new task the default if C(7).
- See U(https://msdn.microsoft.com/en-us/library/windows/desktop/aa383512.aspx)
for details on the priority levels.
version_added: '2.5'
restart_count:
description:
- The number of times that the Task Scheduler will attempt to restart the
task.
version_added: '2.5'
restart_interval:
description:
- How long the Task Scheduler will attempt to restart the task.
- If this is set then C(restart_count) must also be set.
- The maximum allowed time is 31 days.
- The minimum allowed time is 1 minute.
- This is in the ISO 8601 Duration format C(P[n]Y[n]M[n]DT[n]H[n]M[n]S).
version_added: '2.5'
run_only_if_idle:
description:
- Whether the task will run the task only if the computer is in an idle
state.
type: bool
version_added: '2.5'
run_only_if_network_available:
description:
- Whether the task will run only when a network is available.
type: bool
version_added: '2.5'
start_when_available:
description:
- Whether the task can start at any time after its scheduled time has
passed.
type: bool
version_added: '2.5'
stop_if_going_on_batteries:
description:
- Whether the task will be stopped if the computer begins to run on battery
power.
type: bool
version_added: '2.5'
wake_to_run:
description:
- Whether the task will wake the computer when it is time to run the task.
type: bool
version_added: '2.5'
author:
- Peter Mounce (@petemounce)
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
- name: create a task to open 2 command prompts as SYSTEM
win_scheduled_task:
name: TaskName
description: open command prompt
actions:
- path: cmd.exe
arguments: /c hostname
- path: cmd.exe
arguments: /c whoami
triggers:
- type: daily
start_boundary: 2017-10-09T09:00:00
username: SYSTEM
state: present
enabled: yes
- name: create task to run a PS script as NETWORK service on boot
win_scheduled_task:
name: TaskName2
description: Run a PowerShell script
actions:
- path: C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe
arguments: -ExecutionPolicy Unrestricted -NonInteractive -File C:\TestDir\Test.ps1
triggers:
- type: boot
username: NETWORK SERVICE
run_level: highest
state: present
- name: change above task to run under a domain user account, storing the passwords
win_scheduled_task:
name: TaskName2
username: DOMAIN\User
password: Password
logon_type: password
- name: change the above task again, choosing not to store the password
win_scheduled_task:
name: TaskName2
username: DOMAIN\User
logon_type: s4u
- name: create task with multiple triggers
win_scheduled_task:
name: TriggerTask
path: \Custom
actions:
- path: cmd.exe
triggers:
- type: daily
- type: monthlydow
username: SYSTEM
- name: set logon type to password but don't force update the password
win_scheduled_task:
name: TriggerTask
path: \Custom
actions:
- path: cmd.exe
username: Administrator
password: password
update_password: no
- name: disable a task that already exists
win_scheduled_task:
name: TaskToDisable
enabled: no
'''
RETURN = r'''
'''
|
gpl-3.0
|
luogangyi/Ceilometer-oVirt
|
build/lib/ceilometer/network/statistics/opendaylight/client.py
|
4
|
6462
|
#
# Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo.config import cfg
import requests
from requests import auth
import six
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
CONF = cfg.CONF
CONF.import_opt('http_timeout', 'ceilometer.service')
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class _Base():
"""Base class of OpenDaylight REST APIs Clients."""
@abc.abstractproperty
def base_url(self):
"""Returns base url for each REST API."""
def __init__(self, client):
self.client = client
def request(self, path, container_name):
return self.client.request(self.base_url + path, container_name)
class OpenDaylightRESTAPIFailed(Exception):
pass
class StatisticsAPIClient(_Base):
"""OpenDaylight Statistics REST API Client
Base URL:
{endpoint}/statistics/{containerName}
"""
base_url = '/statistics/%(container_name)s'
def get_port_statistics(self, container_name):
"""Get port statistics
URL:
{Base URL}/port
"""
return self.request('/port', container_name)
def get_flow_statistics(self, container_name):
"""Get flow statistics
URL:
{Base URL}/flow
"""
return self.request('/flow', container_name)
def get_table_statistics(self, container_name):
"""Get table statistics
URL:
{Base URL}/table
"""
return self.request('/table', container_name)
class TopologyAPIClient(_Base):
"""OpenDaylight Topology REST API Client
Base URL:
{endpoint}/topology/{containerName}
"""
base_url = '/topology/%(container_name)s'
def get_topology(self, container_name):
"""Get topology
URL:
{Base URL}
"""
return self.request('', container_name)
def get_user_links(self, container_name):
"""Get user links
URL:
{Base URL}/userLinks
"""
return self.request('/userLinks', container_name)
class SwitchManagerAPIClient(_Base):
"""OpenDaylight Switch Manager REST API Client
Base URL:
{endpoint}/switchmanager/{containerName}
"""
base_url = '/switchmanager/%(container_name)s'
def get_nodes(self, container_name):
"""Get node informations
URL:
{Base URL}/nodes
"""
return self.request('/nodes', container_name)
class HostTrackerAPIClient(_Base):
"""OpenDaylight Host Tracker REST API Client
Base URL:
{endpoint}/hosttracker/{containerName}
"""
base_url = '/hosttracker/%(container_name)s'
def get_active_hosts(self, container_name):
"""Get active hosts informatinos
URL:
{Base URL}/hosts/active
"""
return self.request('/hosts/active', container_name)
def get_inactive_hosts(self, container_name):
"""Get inactive hosts informations
URL:
{Base URL}/hosts/inactive
"""
return self.request('/hosts/inactive', container_name)
class Client():
def __init__(self, endpoint, params):
self.statistics = StatisticsAPIClient(self)
self.topology = TopologyAPIClient(self)
self.switch_manager = SwitchManagerAPIClient(self)
self.host_tracker = HostTrackerAPIClient(self)
self._endpoint = endpoint
self._req_params = self._get_req_params(params)
@staticmethod
def _get_req_params(params):
req_params = {
'headers': {
'Accept': 'application/json'
},
'timeout': CONF.http_timeout,
}
auth_way = params.get('auth')
if auth_way in ['basic', 'digest']:
user = params.get('user')
password = params.get('password')
if auth_way == 'basic':
auth_class = auth.HTTPBasicAuth
else:
auth_class = auth.HTTPDigestAuth
req_params['auth'] = auth_class(user, password)
return req_params
def _log_req(self, url):
curl_command = ['REQ: curl -i -X GET ', '"%s" ' % (url)]
if 'auth' in self._req_params:
auth_class = self._req_params['auth']
if isinstance(auth_class, auth.HTTPBasicAuth):
curl_command.append('--basic ')
else:
curl_command.append('--digest ')
curl_command.append('--user "%s":"%s" ' % (auth_class.username,
auth_class.password))
for name, value in six.iteritems(self._req_params['headers']):
curl_command.append('-H "%s: %s" ' % (name, value))
LOG.debug(''.join(curl_command))
@staticmethod
def _log_res(resp):
dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version,
resp.status_code,
resp.reason)]
dump.extend('%s: %s\n' % (k, v)
for k, v in six.iteritems(resp.headers))
dump.append('\n')
if resp.content:
dump.extend([resp.content, '\n'])
LOG.debug(''.join(dump))
def _http_request(self, url):
if CONF.debug:
self._log_req(url)
resp = requests.get(url, **self._req_params)
if CONF.debug:
self._log_res(resp)
if resp.status_code / 100 != 2:
raise OpenDaylightRESTAPIFailed(
_('OpenDaylitght API returned %(status)s %(reason)s') %
{'status': resp.status_code, 'reason': resp.reason})
return resp.json()
def request(self, path, container_name):
url = self._endpoint + path % {'container_name': container_name}
return self._http_request(url)
|
apache-2.0
|
colinligertwood/odoo
|
addons/web/tests/test_menu.py
|
34
|
5729
|
# -*- coding: utf-8 -*-
import collections
import mock
import unittest2
from openerp.http import request as req
from . import common
from ..controllers import main
class Placeholder(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
class LoadTest(common.MockRequestCase):
def setUp(self):
super(LoadTest, self).setUp()
self.menu = main.Menu()
# Have self.request.session.model() return a different mock object for
# each model (but always the same mock for a given model name)
models = collections.defaultdict(mock.Mock)
model = req.session.model.side_effect = \
lambda model_name: models[model_name]
self.MockMenus = model('ir.ui.menu')
# Mock the absence of custom menu
model('res.users').read.return_value = []
def tearDown(self):
del self.MockMenus
del self.menu
super(LoadTest, self).tearDown()
def test_empty(self):
self.MockMenus.search.return_value = []
self.MockMenus.read.return_value = []
root = self.menu.load()
self.MockMenus.search.assert_called_with(
[('parent_id','=', False)], 0, False, False,
req.context)
self.assertEqual(root['all_menu_ids'], [])
self.assertListEqual(
root['children'],
[])
def test_applications_sort(self):
self.MockMenus.search.return_value = [1, 2, 3]
self.MockMenus.read.side_effect = lambda *args: [
{'id': 1, 'sequence': 1, 'parent_id': False},
{'id': 3, 'sequence': 2, 'parent_id': False},
{'id': 2, 'sequence': 3, 'parent_id': False},
]
root = self.menu.load()
self.MockMenus.search.assert_called_with(
[('id','child_of', [1, 2, 3])], 0, False, False,
req.context)
self.MockMenus.read.assert_called_with(
[1, 2, 3], ['name', 'sequence', 'parent_id',
'action'],
req.context)
self.assertEqual(root['all_menu_ids'], [1, 2, 3])
self.assertEqual(
root['children'],
[{
'id': 1, 'sequence': 1,
'parent_id': False, 'children': []
}, {
'id': 3, 'sequence': 2,
'parent_id': False, 'children': []
}, {
'id': 2, 'sequence': 3,
'parent_id': False, 'children': []
}])
def test_deep(self):
self.MockMenus.search.side_effect = lambda domain, *args: (
[1] if domain == [('parent_id', '=', False)] else [1, 2, 3, 4])
root = {'id': 1, 'sequence': 1, 'parent_id': False}
self.MockMenus.read.side_effect = lambda ids, *args: (
[root] if ids == [1] else [
{'id': 1, 'sequence': 1, 'parent_id': False},
{'id': 2, 'sequence': 2, 'parent_id': [1, '']},
{'id': 3, 'sequence': 1, 'parent_id': [2, '']},
{'id': 4, 'sequence': 2, 'parent_id': [2, '']},
])
root = self.menu.load()
self.MockMenus.search.assert_called_with(
[('id','child_of', [1])], 0, False, False,
req.context)
self.assertEqual(root['all_menu_ids'], [1, 2, 3, 4])
self.assertEqual(
root['children'],
[{
'id': 1,
'sequence': 1,
'parent_id': False,
'children': [{
'id': 2,
'sequence': 2,
'parent_id': [1, ''],
'children': [{
'id': 3,
'sequence': 1,
'parent_id': [2, ''],
'children': []
}, {
'id': 4,
'sequence': 2,
'parent_id': [2, ''],
'children': []
}]
}]
}]
)
class ActionMungerTest(unittest2.TestCase):
def setUp(self):
self.menu = main.Menu()
def test_actual_treeview(self):
action = {
"views": [[False, "tree"], [False, "form"],
[False, "calendar"]],
"view_type": "tree",
"view_id": False,
"view_mode": "tree,form,calendar"
}
changed = action.copy()
del action['view_type']
main.fix_view_modes(changed)
self.assertEqual(changed, action)
def test_list_view(self):
action = {
"views": [[False, "tree"], [False, "form"],
[False, "calendar"]],
"view_type": "form",
"view_id": False,
"view_mode": "tree,form,calendar"
}
main.fix_view_modes(action)
self.assertEqual(action, {
"views": [[False, "list"], [False, "form"],
[False, "calendar"]],
"view_id": False,
"view_mode": "list,form,calendar"
})
def test_redundant_views(self):
action = {
"views": [[False, "tree"], [False, "form"],
[False, "calendar"], [42, "tree"]],
"view_type": "form",
"view_id": False,
"view_mode": "tree,form,calendar"
}
main.fix_view_modes(action)
self.assertEqual(action, {
"views": [[False, "list"], [False, "form"],
[False, "calendar"], [42, "list"]],
"view_id": False,
"view_mode": "list,form,calendar"
})
|
agpl-3.0
|
ambikeshwar1991/gnuradio-3.7.4
|
gr-utils/python/modtool/templates.py
|
27
|
20572
|
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
''' All the templates for skeleton files (needed by ModToolAdd) '''
from datetime import datetime
Templates = {}
# Default licence
Templates['defaultlicense'] = '''
Copyright %d <+YOU OR YOUR COMPANY+>.
This is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this software; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Street,
Boston, MA 02110-1301, USA.
''' % datetime.now().year
Templates['grlicense'] = '''
Copyright %d Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GNU Radio is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU Radio; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Street,
Boston, MA 02110-1301, USA.
''' % datetime.now().year
# Header file of a sync/decimator/interpolator block
Templates['block_impl_h'] = '''/* -*- c++ -*- */
${str_to_fancyc_comment($license)}
\#ifndef INCLUDED_${modname.upper()}_${blockname.upper()}_IMPL_H
\#define INCLUDED_${modname.upper()}_${blockname.upper()}_IMPL_H
\#include <${include_dir_prefix}/${blockname}.h>
namespace gr {
namespace ${modname} {
class ${blockname}_impl : public ${blockname}
{
private:
// Nothing to declare in this block.
#if $blocktype == 'tagged_stream'
protected:
int calculate_output_stream_length(const gr_vector_int &ninput_items);
#end if
public:
${blockname}_impl(${strip_default_values($arglist)});
~${blockname}_impl();
// Where all the action really happens
#if $blocktype == 'general'
void forecast (int noutput_items, gr_vector_int &ninput_items_required);
int general_work(int noutput_items,
gr_vector_int &ninput_items,
gr_vector_const_void_star &input_items,
gr_vector_void_star &output_items);
#else if $blocktype == 'tagged_stream'
int work(int noutput_items,
gr_vector_int &ninput_items,
gr_vector_const_void_star &input_items,
gr_vector_void_star &output_items);
#else if $blocktype == 'hier'
#silent pass
#else
int work(int noutput_items,
gr_vector_const_void_star &input_items,
gr_vector_void_star &output_items);
#end if
};
} // namespace ${modname}
} // namespace gr
\#endif /* INCLUDED_${modname.upper()}_${blockname.upper()}_IMPL_H */
'''
# C++ file of a GR block
Templates['block_impl_cpp'] = '''/* -*- c++ -*- */
${str_to_fancyc_comment($license)}
\#ifdef HAVE_CONFIG_H
\#include "config.h"
\#endif
\#include <gnuradio/io_signature.h>
#if $blocktype == 'noblock'
\#include <${include_dir_prefix}/${blockname}.h>
#else
\#include "${blockname}_impl.h"
#end if
namespace gr {
namespace ${modname} {
#if $blocktype == 'noblock'
$blockname::${blockname}(${strip_default_values($arglist)})
{
}
$blockname::~${blockname}()
{
}
#else
${blockname}::sptr
${blockname}::make(${strip_default_values($arglist)})
{
return gnuradio::get_initial_sptr
(new ${blockname}_impl(${strip_arg_types($arglist)}));
}
#if $blocktype == 'decimator'
#set $decimation = ', <+decimation+>'
#else if $blocktype == 'interpolator'
#set $decimation = ', <+interpolation+>'
#else if $blocktype == 'tagged_stream'
#set $decimation = ', <+len_tag_key+>'
#else
#set $decimation = ''
#end if
#if $blocktype == 'source'
#set $inputsig = '0, 0, 0'
#else
#set $inputsig = '<+MIN_IN+>, <+MAX_IN+>, sizeof(<+ITYPE+>)'
#end if
#if $blocktype == 'sink'
#set $outputsig = '0, 0, 0'
#else
#set $outputsig = '<+MIN_OUT+>, <+MAX_OUT+>, sizeof(<+OTYPE+>)'
#end if
/*
* The private constructor
*/
${blockname}_impl::${blockname}_impl(${strip_default_values($arglist)})
: gr::${grblocktype}("${blockname}",
gr::io_signature::make($inputsig),
gr::io_signature::make($outputsig)$decimation)
#if $blocktype == 'hier'
{
connect(self(), 0, d_firstblock, 0);
// connect other blocks
connect(d_lastblock, 0, self(), 0);
}
#else
{}
#end if
/*
* Our virtual destructor.
*/
${blockname}_impl::~${blockname}_impl()
{
}
#if $blocktype == 'general'
void
${blockname}_impl::forecast (int noutput_items, gr_vector_int &ninput_items_required)
{
/* <+forecast+> e.g. ninput_items_required[0] = noutput_items */
}
int
${blockname}_impl::general_work (int noutput_items,
gr_vector_int &ninput_items,
gr_vector_const_void_star &input_items,
gr_vector_void_star &output_items)
{
const <+ITYPE*> *in = (const <+ITYPE*> *) input_items[0];
<+OTYPE*> *out = (<+OTYPE*> *) output_items[0];
// Do <+signal processing+>
// Tell runtime system how many input items we consumed on
// each input stream.
consume_each (noutput_items);
// Tell runtime system how many output items we produced.
return noutput_items;
}
#else if $blocktype == 'tagged_stream'
int
${blockname}_impl::calculate_output_stream_length(const gr_vector_int &ninput_items)
{
int noutput_items = /* <+set this+> */;
return noutput_items ;
}
int
${blockname}_impl::work (int noutput_items,
gr_vector_int &ninput_items,
gr_vector_const_void_star &input_items,
gr_vector_void_star &output_items)
{
const <+ITYPE+> *in = (const <+ITYPE+> *) input_items[0];
<+OTYPE+> *out = (<+OTYPE+> *) output_items[0];
// Do <+signal processing+>
// Tell runtime system how many output items we produced.
return noutput_items;
}
#else if $blocktype == 'hier'
#silent pass
#else
int
${blockname}_impl::work(int noutput_items,
gr_vector_const_void_star &input_items,
gr_vector_void_star &output_items)
{
#if $blocktype == 'source'
#silent pass
#else
const <+ITYPE+> *in = (const <+ITYPE+> *) input_items[0];
#end if
#if $blocktype == 'sink'
#silent pass
#else
<+OTYPE+> *out = (<+OTYPE+> *) output_items[0];
#end if
// Do <+signal processing+>
// Tell runtime system how many output items we produced.
return noutput_items;
}
#end if
#end if
} /* namespace ${modname} */
} /* namespace gr */
'''
# Block definition header file (for include/)
Templates['block_def_h'] = '''/* -*- c++ -*- */
${str_to_fancyc_comment($license)}
\#ifndef INCLUDED_${modname.upper()}_${blockname.upper()}_H
\#define INCLUDED_${modname.upper()}_${blockname.upper()}_H
\#include <${include_dir_prefix}/api.h>
#if $blocktype != 'noblock'
\#include <gnuradio/${grblocktype}.h>
#end if
namespace gr {
namespace ${modname} {
#if $blocktype == 'noblock'
/*!
* \\brief <+description+>
*
*/
class ${modname.upper()}_API $blockname
{
public:
${blockname}(${arglist});
~${blockname}();
private:
};
#else
/*!
* \\brief <+description of block+>
* \ingroup ${modname}
*
*/
class ${modname.upper()}_API ${blockname} : virtual public gr::$grblocktype
{
public:
typedef boost::shared_ptr<${blockname}> sptr;
/*!
* \\brief Return a shared_ptr to a new instance of ${modname}::${blockname}.
*
* To avoid accidental use of raw pointers, ${modname}::${blockname}'s
* constructor is in a private implementation
* class. ${modname}::${blockname}::make is the public interface for
* creating new instances.
*/
static sptr make($arglist);
};
#end if
} // namespace ${modname}
} // namespace gr
\#endif /* INCLUDED_${modname.upper()}_${blockname.upper()}_H */
'''
# Python block
Templates['block_python'] = '''\#!/usr/bin/env python
# -*- coding: utf-8 -*-
${str_to_python_comment($license)}
#
#if $blocktype == 'noblock'
#stop
#end if
#if $blocktype in ('sync', 'sink', 'source')
#set $parenttype = 'gr.sync_block'
#else
#set $parenttype = {'hier': 'gr.hier_block2', 'interpolator': 'gr.interp_block', 'decimator': 'gr.decim_block', 'general': 'gr.basic_block'}[$blocktype]
#end if
#if $blocktype != 'hier'
import numpy
#if $blocktype == 'source'
#set $inputsig = 'None'
#else
#set $inputsig = '[<+numpy.float+>]'
#end if
#if $blocktype == 'sink'
#set $outputsig = 'None'
#else
#set $outputsig = '[<+numpy.float+>]'
#end if
#else
#if $blocktype == 'source'
#set $inputsig = '0, 0, 0'
#else
#set $inputsig = '<+MIN_IN+>, <+MAX_IN+>, gr.sizeof_<+ITYPE+>'
#end if
#if $blocktype == 'sink'
#set $outputsig = '0, 0, 0'
#else
#set $outputsig = '<+MIN_OUT+>, <+MAX_OUT+>, gr.sizeof_<+OTYPE+>'
#end if
#end if
#if $blocktype == 'interpolator'
#set $deciminterp = ', <+interpolation+>'
#else if $blocktype == 'decimator'
#set $deciminterp = ', <+decimation+>'
#else
#set $deciminterp = ''
#end if
from gnuradio import gr
class ${blockname}(${parenttype}):
"""
docstring for block ${blockname}
"""
def __init__(self#if $arglist == '' then '' else ', '#$arglist):
${parenttype}.__init__(self,
#if $blocktype == 'hier'
"$blockname",
gr.io_signature(${inputsig}), # Input signature
gr.io_signature(${outputsig})) # Output signature
# Define blocks and connect them
self.connect()
#stop
#else
name="${blockname}",
in_sig=${inputsig},
out_sig=${outputsig}${deciminterp})
#end if
#if $blocktype == 'general'
def forecast(self, noutput_items, ninput_items_required):
#setup size of input_items[i] for work call
for i in range(len(ninput_items_required)):
ninput_items_required[i] = noutput_items
def general_work(self, input_items, output_items):
output_items[0][:] = input_items[0]
consume(0, len(input_items[0]))
\#self.consume_each(len(input_items[0]))
return len(output_items[0])
#stop
#end if
def work(self, input_items, output_items):
#if $blocktype != 'source'
in0 = input_items[0]
#end if
#if $blocktype != 'sink'
out = output_items[0]
#end if
# <+signal processing here+>
#if $blocktype in ('sync', 'decimator', 'interpolator')
out[:] = in0
return len(output_items[0])
#else if $blocktype == 'sink'
return len(input_items[0])
#else if $blocktype == 'source'
out[:] = whatever
return len(output_items[0])
#end if
'''
# C++ file for QA
Templates['qa_cpp'] = '''/* -*- c++ -*- */
${str_to_fancyc_comment($license)}
\#include <gnuradio/attributes.h>
\#include <cppunit/TestAssert.h>
\#include "qa_${blockname}.h"
\#include <${include_dir_prefix}/${blockname}.h>
namespace gr {
namespace ${modname} {
void
qa_${blockname}::t1()
{
// Put test here
}
} /* namespace ${modname} */
} /* namespace gr */
'''
# Header file for QA
Templates['qa_h'] = '''/* -*- c++ -*- */
${str_to_fancyc_comment($license)}
\#ifndef _QA_${blockname.upper()}_H_
\#define _QA_${blockname.upper()}_H_
\#include <cppunit/extensions/HelperMacros.h>
\#include <cppunit/TestCase.h>
namespace gr {
namespace ${modname} {
class qa_${blockname} : public CppUnit::TestCase
{
public:
CPPUNIT_TEST_SUITE(qa_${blockname});
CPPUNIT_TEST(t1);
CPPUNIT_TEST_SUITE_END();
private:
void t1();
};
} /* namespace ${modname} */
} /* namespace gr */
\#endif /* _QA_${blockname.upper()}_H_ */
'''
# Python QA code
Templates['qa_python'] = '''\#!/usr/bin/env python
# -*- coding: utf-8 -*-
${str_to_python_comment($license)}
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
#if $lang == 'cpp'
import ${modname}_swig as ${modname}
#else
from ${blockname} import ${blockname}
#end if
class qa_$blockname (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_${blockname}, "qa_${blockname}.xml")
'''
Templates['grc_xml'] = '''<?xml version="1.0"?>
<block>
<name>$blockname</name>
<key>${modname}_$blockname</key>
<category>$modname</category>
<import>import $modname</import>
<make>${modname}.${blockname}(${strip_arg_types_grc($arglist)})</make>
<!-- Make one 'param' node for every Parameter you want settable from the GUI.
Sub-nodes:
* name
* key (makes the value accessible as \$keyname, e.g. in the make node)
* type -->
<param>
<name>...</name>
<key>...</key>
<type>...</type>
</param>
<!-- Make one 'sink' node per input. Sub-nodes:
* name (an identifier for the GUI)
* type
* vlen
* optional (set to 1 for optional inputs) -->
<sink>
<name>in</name>
<type><!-- e.g. int, float, complex, byte, short, xxx_vector, ...--></type>
</sink>
<!-- Make one 'source' node per output. Sub-nodes:
* name (an identifier for the GUI)
* type
* vlen
* optional (set to 1 for optional inputs) -->
<source>
<name>out</name>
<type><!-- e.g. int, float, complex, byte, short, xxx_vector, ...--></type>
</source>
</block>
'''
# Usage
Templates['usage'] = '''
gr_modtool <command> [options] -- Run <command> with the given options.
gr_modtool help -- Show a list of commands.
gr_modtool help <command> -- Shows the help for a given command. '''
# SWIG string
Templates['swig_block_magic'] = """#if $version == '36'
#if $blocktype != 'noblock'
GR_SWIG_BLOCK_MAGIC($modname, $blockname);
#end if
%include "${modname}_${blockname}.h"
#else
%include "${include_dir_prefix}/${blockname}.h"
#if $blocktype != 'noblock'
GR_SWIG_BLOCK_MAGIC2($modname, $blockname);
#end if
#end if
"""
## Old stuff
# C++ file of a GR block
Templates['block_cpp36'] = '''/* -*- c++ -*- */
${str_to_fancyc_comment($license)}
\#ifdef HAVE_CONFIG_H
\#include "config.h"
\#endif
#if $blocktype != 'noblock'
\#include <gr_io_signature.h>
#end if
\#include "${modname}_${blockname}.h"
#if $blocktype == 'noblock'
${modname}_${blockname}::${modname}_${blockname}(${strip_default_values($arglist)})
{
}
${modname}_${blockname}::~${modname}_${blockname}()
{
}
#else
${modname}_${blockname}_sptr
${modname}_make_${blockname} (${strip_default_values($arglist)})
{
return gnuradio::get_initial_sptr (new ${modname}_${blockname}(${strip_arg_types($arglist)}));
}
#if $blocktype == 'decimator'
#set $decimation = ', <+decimation+>'
#else if $blocktype == 'interpolator'
#set $decimation = ', <+interpolation+>'
#else
#set $decimation = ''
#end if
#if $blocktype == 'sink'
#set $inputsig = '0, 0, 0'
#else
#set $inputsig = '<+MIN_IN+>, <+MAX_IN+>, sizeof(<+ITYPE+>)'
#end if
#if $blocktype == 'source'
#set $outputsig = '0, 0, 0'
#else
#set $outputsig = '<+MIN_OUT+>, <+MAX_OUT+>, sizeof(<+OTYPE+>)'
#end if
/*
* The private constructor
*/
${modname}_${blockname}::${modname}_${blockname} (${strip_default_values($arglist)})
: gr_${grblocktype} ("${blockname}",
gr_make_io_signature($inputsig),
gr_make_io_signature($outputsig)$decimation)
{
#if $blocktype == 'hier'
connect(self(), 0, d_firstblock, 0);
// <+connect other blocks+>
connect(d_lastblock, 0, self(), 0);
#else
// Put in <+constructor stuff+> here
#end if
}
/*
* Our virtual destructor.
*/
${modname}_${blockname}::~${modname}_${blockname}()
{
// Put in <+destructor stuff+> here
}
#end if
#if $blocktype == 'general'
void
${modname}_${blockname}::forecast (int noutput_items, gr_vector_int &ninput_items_required)
{
/* <+forecast+> e.g. ninput_items_required[0] = noutput_items */
}
int
${modname}_${blockname}::general_work (int noutput_items,
gr_vector_int &ninput_items,
gr_vector_const_void_star &input_items,
gr_vector_void_star &output_items)
{
const <+ITYPE+> *in = (const <+ITYPE+> *) input_items[0];
<+OTYPE+> *out = (<+OTYPE+> *) output_items[0];
// Do <+signal processing+>
// Tell runtime system how many input items we consumed on
// each input stream.
consume_each (noutput_items);
// Tell runtime system how many output items we produced.
return noutput_items;
}
#else if $blocktype == 'hier' or $blocktype == 'noblock'
#pass
#else
int
${modname}_${blockname}::work(int noutput_items,
gr_vector_const_void_star &input_items,
gr_vector_void_star &output_items)
{
const <+ITYPE+> *in = (const <+ITYPE+> *) input_items[0];
<+OTYPE+> *out = (<+OTYPE+> *) output_items[0];
// Do <+signal processing+>
// Tell runtime system how many output items we produced.
return noutput_items;
}
#end if
'''
# Block definition header file (for include/)
Templates['block_h36'] = '''/* -*- c++ -*- */
${str_to_fancyc_comment($license)}
\#ifndef INCLUDED_${modname.upper()}_${blockname.upper()}_H
\#define INCLUDED_${modname.upper()}_${blockname.upper()}_H
\#include <${modname}_api.h>
#if $blocktype == 'noblock'
class ${modname.upper()}_API $blockname
{
${blockname}(${arglist});
~${blockname}();
private:
};
#else
\#include <gr_${grblocktype}.h>
class ${modname}_${blockname};
typedef boost::shared_ptr<${modname}_${blockname}> ${modname}_${blockname}_sptr;
${modname.upper()}_API ${modname}_${blockname}_sptr ${modname}_make_${blockname} ($arglist);
/*!
* \\brief <+description+>
* \ingroup ${modname}
*
*/
class ${modname.upper()}_API ${modname}_${blockname} : public gr_$grblocktype
{
private:
friend ${modname.upper()}_API ${modname}_${blockname}_sptr ${modname}_make_${blockname} (${strip_default_values($arglist)});
${modname}_${blockname}(${strip_default_values($arglist)});
public:
~${modname}_${blockname}();
#if $blocktype == 'general'
void forecast (int noutput_items, gr_vector_int &ninput_items_required);
// Where all the action really happens
int general_work (int noutput_items,
gr_vector_int &ninput_items,
gr_vector_const_void_star &input_items,
gr_vector_void_star &output_items);
#else if $blocktype == 'hier'
#pass
#else
// Where all the action really happens
int work (int noutput_items,
gr_vector_const_void_star &input_items,
gr_vector_void_star &output_items);
#end if
};
#end if
\#endif /* INCLUDED_${modname.upper()}_${blockname.upper()}_H */
'''
# C++ file for QA
Templates['qa_cpp36'] = '''/* -*- c++ -*- */
${str_to_fancyc_comment($license)}
\#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_CASE(qa_${modname}_${blockname}_t1){
BOOST_CHECK_EQUAL(2 + 2, 4);
// TODO BOOST_* test macros here
}
BOOST_AUTO_TEST_CASE(qa_${modname}_${blockname}_t2){
BOOST_CHECK_EQUAL(2 + 2, 4);
// TODO BOOST_* test macros here
}
'''
# Header file for QA
Templates['qa_cmakeentry36'] = """
add_executable($basename $filename)
target_link_libraries($basename gnuradio-$modname \${Boost_LIBRARIES})
GR_ADD_TEST($basename $basename)
"""
|
gpl-3.0
|
egabancho/lumberjack
|
lumberjack/schemas.py
|
3
|
4741
|
# -*- coding: utf-8 -*-
#
# This file is part of Lumberjack.
# Copyright 2014 CERN.
#
# Lumberjack is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# Lumberjack is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lumberjack. If not, see <http://www.gnu.org/licenses/>.
"""Provides SchemaManager class."""
from __future__ import absolute_import
import logging
from elasticsearch import NotFoundError, TransportError
from copy import deepcopy
class SchemaManager(object):
"""Manage the 'schemas' for different types of log data.
A detailed description of schemas is given in the documentation for
``lumberjack.Lumberjack.register_schema``.
This class manages a list of schemas registered and ensures that they are
processed and passed into Elasticsearch as appropriate.
:param elasticsearch: The ``elasticsearch.Elasticsearch`` object to
register mappings and templates with.
:param config: The Lumberjack config. See the Configuration section in the
docs for details.
"""
def __init__(self, elasticsearch, config):
"""Init method. See class docstring."""
self.elasticsearch = elasticsearch
self.config = config
def register_schema(self, logger, schema):
"""Take a new schema and add it to the roster.
This also automatically parses the schema into a mapping and adds it
into the appropriate index template in Elasticsearch.
:param logger: The name of the logger which the log data will be
emitted on.
:param schema: The schema data to be processed into a mapping.
"""
mapping = self._build_mapping(schema)
template = {
'template': self.config['index_prefix'] + '*',
'mappings': {
logger: mapping
}
}
logging.getLogger(__name__).debug(
'Registering a new template for %s.', logger)
try:
self.elasticsearch.indices.put_template(
name='lumberjack-' + self.config['index_prefix'] +
logger,
body=template
)
except TransportError:
logging.getLogger(__name__).warning(
'Error putting new template in Elasticsearch: %s.',
logger,
exc_info=True)
# Try to update existing things.
try:
self.elasticsearch.indices.put_mapping(
index=self.config['index_prefix'] + '*',
doc_type=logger,
body=mapping
)
except NotFoundError:
pass
except TransportError:
logging.getLogger(__name__).warning(
'There was an error putting the new mapping on some ' +
'indices. If you try to log new data to these, you ' +
'will see errors.',
exc_info=True)
def _build_mapping(self, schema):
"""Parse the schema into an Elasticsearch mapping."""
# Shorthand
default_type_props = self.config['default_type_properties']
this_mapping = deepcopy(self.config['default_mapping'])
working_schema = deepcopy(schema)
# Combine the unprocessed properties into this_mapping.
if 'properties' in working_schema:
this_mapping['properties'].update(schema['properties'])
# So we don't overwrite this_mapping['properties'] later
del working_schema['properties']
# Expand the fields in this_mapping['properties'] based on type.
expanded_properties = {}
for (field_name, field_info) in this_mapping['properties'].items():
expanded_properties[field_name] = {}
if ('type' in field_info and
field_info['type'] in default_type_props):
expanded_properties[field_name].update(
default_type_props[field_info['type']])
expanded_properties[field_name].update(field_info)
# Put the expanded properties into the mapping for this type.
this_mapping['properties'] = expanded_properties
# Overwrite the defaults where applicable.
this_mapping.update(working_schema)
return this_mapping
|
gpl-3.0
|
mconlon17/vivo-pump
|
uf_examples/courses/merge_filter.py
|
3
|
1328
|
#!/usr/bin/env/python
"""
merge_filter.py -- find the courses in VIVO, and match them to the courses in the source. They
must match on ccn
There are two inputs:
1. Courses in VIVO. Keyed by ccn
2. UF courses in the source. Keyed the same.
There are three cases
1. Course in VIVO and in Source => Update VIVO from source
1. Course in VIVO, not in source => nothing to do
1. Course not in VIVO, is in source => Add to VIVO
See CHANGELOG.md for history
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2016 (c) Michael Conlon"
__license__ = "New BSD License"
__version__ = "0.02"
import sys
from pump.vivopump import read_csv_fp, write_csv_fp, get_vivo_ccn, get_parms
parms = get_parms()
data_in = read_csv_fp(sys.stdin)
print >>sys.stderr, len(data_in)
data_out = {}
vivo_courses = get_vivo_ccn(parms) # get dictionary of course uri keyed by ccn
print >>sys.stderr, 'VIVO courses', len(vivo_courses)
for row, data in data_in.items():
new_data = dict(data)
if data['ccn'] in vivo_courses: # ccn is in vivo and source
new_data['uri'] = vivo_courses[data['ccn']]
else: # key is in source, not in vivo
new_data['uri'] = ''
data_out[row] = new_data
print >>sys.stderr, 'data out', len(data_out)
write_csv_fp(sys.stdout, data_out)
|
bsd-2-clause
|
titasakgm/brc-stock
|
openerp/addons/l10n_ch/account_wizard.py
|
424
|
2192
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Financial contributors: Hasa SA, Open Net SA,
# Prisme Solutions Informatique SA, Quod SA
#
# Translation contributors: brain-tec AG, Agile Business Group
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import TransientModel
class WizardMultiChartsAccounts(TransientModel):
_inherit ='wizard.multi.charts.accounts'
def onchange_chart_template_id(self, cursor, uid, ids, chart_template_id=False, context=None):
if context is None: context = {}
res = super(WizardMultiChartsAccounts, self).onchange_chart_template_id(cursor, uid, ids,
chart_template_id=chart_template_id,
context=context)
# 0 is evaluated as False in python so we have to do this
# because original wizard test code_digits value on a float widget
if chart_template_id:
sterchi_template = self.pool.get('ir.model.data').get_object(cursor, uid, 'l10n_ch', 'l10nch_chart_template')
if sterchi_template.id == chart_template_id:
res['value']['code_digits'] = 0
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
shipci/sympy
|
sympy/core/tests/test_expr.py
|
4
|
55752
|
from __future__ import division
from sympy import (Add, Basic, S, Symbol, Wild, Float, Integer, Rational, I,
sin, cos, tan, exp, log, nan, oo, sqrt, symbols, Integral, sympify,
WildFunction, Poly, Function, Derivative, Number, pi, NumberSymbol, zoo,
Piecewise, Mul, Pow, nsimplify, ratsimp, trigsimp, radsimp, powsimp,
simplify, together, collect, factorial, apart, combsimp, factor, refine,
cancel, Tuple, default_sort_key, DiracDelta, gamma, Dummy, Sum, E,
exp_polar, Lambda, expand, diff, O, Heaviside)
from sympy.core.function import AppliedUndef
from sympy.physics.secondquant import FockState
from sympy.physics.units import meter
from sympy.core.compatibility import xrange
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import a, b, c, n, t, u, x, y, z
class DummyNumber(object):
"""
Minimal implementation of a number that works with SymPy.
If one has a Number class (e.g. Sage Integer, or some other custom class)
that one wants to work well with SymPy, one has to implement at least the
methods of this class DummyNumber, resp. its subclasses I5 and F1_1.
Basically, one just needs to implement either __int__() or __float__() and
then one needs to make sure that the class works with Python integers and
with itself.
"""
def __radd__(self, a):
if isinstance(a, (int, float)):
return a + self.number
return NotImplemented
def __truediv__(a, b):
return a.__div__(b)
def __rtruediv__(a, b):
return a.__rdiv__(b)
def __add__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number + a
return NotImplemented
def __rsub__(self, a):
if isinstance(a, (int, float)):
return a - self.number
return NotImplemented
def __sub__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number - a
return NotImplemented
def __rmul__(self, a):
if isinstance(a, (int, float)):
return a * self.number
return NotImplemented
def __mul__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number * a
return NotImplemented
def __rdiv__(self, a):
if isinstance(a, (int, float)):
return a / self.number
return NotImplemented
def __div__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number / a
return NotImplemented
def __rpow__(self, a):
if isinstance(a, (int, float)):
return a ** self.number
return NotImplemented
def __pow__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number ** a
return NotImplemented
def __pos__(self):
return self.number
def __neg__(self):
return - self.number
class I5(DummyNumber):
number = 5
def __int__(self):
return self.number
class F1_1(DummyNumber):
number = 1.1
def __float__(self):
return self.number
i5 = I5()
f1_1 = F1_1()
# basic sympy objects
basic_objs = [
Rational(2),
Float("1.3"),
x,
y,
pow(x, y)*y,
]
# all supported objects
all_objs = basic_objs + [
5,
5.5,
i5,
f1_1
]
def dotest(s):
for x in all_objs:
for y in all_objs:
s(x, y)
return True
def test_basic():
def j(a, b):
x = a
x = +a
x = -a
x = a + b
x = a - b
x = a*b
x = a/b
x = a**b
assert dotest(j)
def test_ibasic():
def s(a, b):
x = a
x += b
x = a
x -= b
x = a
x *= b
x = a
x /= b
assert dotest(s)
def test_relational():
from sympy import Lt
assert (pi < 3) is S.false
assert (pi <= 3) is S.false
assert (pi > 3) is S.true
assert (pi >= 3) is S.true
assert (-pi < 3) is S.true
assert (-pi <= 3) is S.true
assert (-pi > 3) is S.false
assert (-pi >= 3) is S.false
r = Symbol('r', real=True)
assert (r - 2 < r - 3) is S.false
assert Lt(x + I, x + I + 2).func == Lt # issue 8288
def test_relational_assumptions():
from sympy import Lt, Gt, Le, Ge
m1 = Symbol("m1", nonnegative=False)
m2 = Symbol("m2", positive=False)
m3 = Symbol("m3", nonpositive=False)
m4 = Symbol("m4", negative=False)
assert (m1 < 0) == Lt(m1, 0)
assert (m2 <= 0) == Le(m2, 0)
assert (m3 > 0) == Gt(m3, 0)
assert (m4 >= 0) == Ge(m4, 0)
m1 = Symbol("m1", nonnegative=False, real=True)
m2 = Symbol("m2", positive=False, real=True)
m3 = Symbol("m3", nonpositive=False, real=True)
m4 = Symbol("m4", negative=False, real=True)
assert (m1 < 0) is S.true
assert (m2 <= 0) is S.true
assert (m3 > 0) is S.true
assert (m4 >= 0) is S.true
m1 = Symbol("m1", negative=True)
m2 = Symbol("m2", nonpositive=True)
m3 = Symbol("m3", positive=True)
m4 = Symbol("m4", nonnegative=True)
assert (m1 < 0) is S.true
assert (m2 <= 0) is S.true
assert (m3 > 0) is S.true
assert (m4 >= 0) is S.true
m1 = Symbol("m1", negative=False, real=True)
m2 = Symbol("m2", nonpositive=False, real=True)
m3 = Symbol("m3", positive=False, real=True)
m4 = Symbol("m4", nonnegative=False, real=True)
assert (m1 < 0) is S.false
assert (m2 <= 0) is S.false
assert (m3 > 0) is S.false
assert (m4 >= 0) is S.false
def test_relational_noncommutative():
from sympy import Lt, Gt, Le, Ge
A, B = symbols('A,B', commutative=False)
assert (A < B) == Lt(A, B)
assert (A <= B) == Le(A, B)
assert (A > B) == Gt(A, B)
assert (A >= B) == Ge(A, B)
def test_basic_nostr():
for obj in basic_objs:
raises(TypeError, lambda: obj + '1')
raises(TypeError, lambda: obj - '1')
if obj == 2:
assert obj * '1' == '11'
else:
raises(TypeError, lambda: obj * '1')
raises(TypeError, lambda: obj / '1')
raises(TypeError, lambda: obj ** '1')
def test_series_expansion_for_uniform_order():
assert (1/x + y + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + x).series(x, 0, 1) == 1/x + y + O(x)
assert (1/x + 1 + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + 1 + x).series(x, 0, 1) == 1/x + 1 + O(x)
assert (1/x + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + y*x + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + y*x + x).series(x, 0, 1) == 1/x + y + O(x)
def test_leadterm():
assert (3 + 2*x**(log(3)/log(2) - 1)).leadterm(x) == (3, 0)
assert (1/x**2 + 1 + x + x**2).leadterm(x)[1] == -2
assert (1/x + 1 + x + x**2).leadterm(x)[1] == -1
assert (x**2 + 1/x).leadterm(x)[1] == -1
assert (1 + x**2).leadterm(x)[1] == 0
assert (x + 1).leadterm(x)[1] == 0
assert (x + x**2).leadterm(x)[1] == 1
assert (x**2).leadterm(x)[1] == 2
def test_as_leading_term():
assert (3 + 2*x**(log(3)/log(2) - 1)).as_leading_term(x) == 3
assert (1/x**2 + 1 + x + x**2).as_leading_term(x) == 1/x**2
assert (1/x + 1 + x + x**2).as_leading_term(x) == 1/x
assert (x**2 + 1/x).as_leading_term(x) == 1/x
assert (1 + x**2).as_leading_term(x) == 1
assert (x + 1).as_leading_term(x) == 1
assert (x + x**2).as_leading_term(x) == x
assert (x**2).as_leading_term(x) == x**2
assert (x + oo).as_leading_term(x) == oo
def test_leadterm2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).leadterm(x) == \
(sin(1 + sin(1)), 0)
def test_leadterm3():
assert (y + z + x).leadterm(x) == (y + z, 0)
def test_as_leading_term2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).as_leading_term(x) == \
sin(1 + sin(1))
def test_as_leading_term3():
assert (2 + pi + x).as_leading_term(x) == 2 + pi
assert (2*x + pi*x + x**2).as_leading_term(x) == (2 + pi)*x
def test_as_leading_term4():
# see issue 6843
n = Symbol('n', integer=True, positive=True)
r = -n**3/(2*n**2 + 4*n + 2) - n**2/(n**2 + 2*n + 1) + \
n**2/(n + 1) - n/(2*n**2 + 4*n + 2) + n/(n*x + x) + 2*n/(n + 1) - \
1 + 1/(n*x + x) + 1/(n + 1) - 1/x
assert r.as_leading_term(x).cancel() == n/2
def test_as_leading_term_stub():
class foo(Function):
pass
assert foo(1/x).as_leading_term(x) == foo(1/x)
assert foo(1).as_leading_term(x) == foo(1)
raises(NotImplementedError, lambda: foo(x).as_leading_term(x))
def test_atoms():
assert x.atoms() == set([x])
assert (1 + x).atoms() == set([x, S(1)])
assert (1 + 2*cos(x)).atoms(Symbol) == set([x])
assert (1 + 2*cos(x)).atoms(Symbol, Number) == set([S(1), S(2), x])
assert (2*(x**(y**x))).atoms() == set([S(2), x, y])
assert Rational(1, 2).atoms() == set([S.Half])
assert Rational(1, 2).atoms(Symbol) == set([])
assert sin(oo).atoms(oo) == set([oo])
assert Poly(0, x).atoms() == set([S.Zero])
assert Poly(1, x).atoms() == set([S.One])
assert Poly(x, x).atoms() == set([x])
assert Poly(x, x, y).atoms() == set([x])
assert Poly(x + y, x, y).atoms() == set([x, y])
assert Poly(x + y, x, y, z).atoms() == set([x, y])
assert Poly(x + y*t, x, y, z).atoms() == set([t, x, y])
assert (I*pi).atoms(NumberSymbol) == set([pi])
assert (I*pi).atoms(NumberSymbol, I) == \
(I*pi).atoms(I, NumberSymbol) == set([pi, I])
assert exp(exp(x)).atoms(exp) == set([exp(exp(x)), exp(x)])
assert (1 + x*(2 + y) + exp(3 + z)).atoms(Add) == \
set([1 + x*(2 + y) + exp(3 + z), 2 + y, 3 + z])
# issue 6132
f = Function('f')
e = (f(x) + sin(x) + 2)
assert e.atoms(AppliedUndef) == \
set([f(x)])
assert e.atoms(AppliedUndef, Function) == \
set([f(x), sin(x)])
assert e.atoms(Function) == \
set([f(x), sin(x)])
assert e.atoms(AppliedUndef, Number) == \
set([f(x), S(2)])
assert e.atoms(Function, Number) == \
set([S(2), sin(x), f(x)])
def test_is_polynomial():
k = Symbol('k', nonnegative=True, integer=True)
assert Rational(2).is_polynomial(x, y, z) is True
assert (S.Pi).is_polynomial(x, y, z) is True
assert x.is_polynomial(x) is True
assert x.is_polynomial(y) is True
assert (x**2).is_polynomial(x) is True
assert (x**2).is_polynomial(y) is True
assert (x**(-2)).is_polynomial(x) is False
assert (x**(-2)).is_polynomial(y) is True
assert (2**x).is_polynomial(x) is False
assert (2**x).is_polynomial(y) is True
assert (x**k).is_polynomial(x) is False
assert (x**k).is_polynomial(k) is False
assert (x**x).is_polynomial(x) is False
assert (k**k).is_polynomial(k) is False
assert (k**x).is_polynomial(k) is False
assert (x**(-k)).is_polynomial(x) is False
assert ((2*x)**k).is_polynomial(x) is False
assert (x**2 + 3*x - 8).is_polynomial(x) is True
assert (x**2 + 3*x - 8).is_polynomial(y) is True
assert (x**2 + 3*x - 8).is_polynomial() is True
assert sqrt(x).is_polynomial(x) is False
assert (sqrt(x)**3).is_polynomial(x) is False
assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(x) is True
assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(y) is False
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial() is True
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial() is False
assert (
(x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial(x, y) is True
assert (
(x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial(x, y) is False
def test_is_rational_function():
assert Integer(1).is_rational_function() is True
assert Integer(1).is_rational_function(x) is True
assert Rational(17, 54).is_rational_function() is True
assert Rational(17, 54).is_rational_function(x) is True
assert (12/x).is_rational_function() is True
assert (12/x).is_rational_function(x) is True
assert (x/y).is_rational_function() is True
assert (x/y).is_rational_function(x) is True
assert (x/y).is_rational_function(x, y) is True
assert (x**2 + 1/x/y).is_rational_function() is True
assert (x**2 + 1/x/y).is_rational_function(x) is True
assert (x**2 + 1/x/y).is_rational_function(x, y) is True
assert (sin(y)/x).is_rational_function() is False
assert (sin(y)/x).is_rational_function(y) is False
assert (sin(y)/x).is_rational_function(x) is True
assert (sin(y)/x).is_rational_function(x, y) is False
def test_is_algebraic_expr():
assert sqrt(3).is_algebraic_expr(x) is True
assert sqrt(3).is_algebraic_expr() is True
eq = ((1 + x**2)/(1 - y**2))**(S(1)/3)
assert eq.is_algebraic_expr(x) is True
assert eq.is_algebraic_expr(y) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr(x) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr(y) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr() is True
assert (cos(y)/sqrt(x)).is_algebraic_expr() is False
assert (cos(y)/sqrt(x)).is_algebraic_expr(x) is True
assert (cos(y)/sqrt(x)).is_algebraic_expr(y) is False
assert (cos(y)/sqrt(x)).is_algebraic_expr(x, y) is False
def test_SAGE1():
#see https://github.com/sympy/sympy/issues/3346
class MyInt:
def _sympy_(self):
return Integer(5)
m = MyInt()
e = Rational(2)*m
assert e == 10
raises(TypeError, lambda: Rational(2)*MyInt)
def test_SAGE2():
class MyInt(object):
def __int__(self):
return 5
assert sympify(MyInt()) == 5
e = Rational(2)*MyInt()
assert e == 10
raises(TypeError, lambda: Rational(2)*MyInt)
def test_SAGE3():
class MySymbol:
def __rmul__(self, other):
return ('mys', other, self)
o = MySymbol()
e = x*o
assert e == ('mys', x, o)
def test_len():
e = x*y
assert len(e.args) == 2
e = x + y + z
assert len(e.args) == 3
def test_doit():
a = Integral(x**2, x)
assert isinstance(a.doit(), Integral) is False
assert isinstance(a.doit(integrals=True), Integral) is False
assert isinstance(a.doit(integrals=False), Integral) is True
assert (2*Integral(x, x)).doit() == x**2
def test_attribute_error():
raises(AttributeError, lambda: x.cos())
raises(AttributeError, lambda: x.sin())
raises(AttributeError, lambda: x.exp())
def test_args():
assert (x*y).args in ((x, y), (y, x))
assert (x + y).args in ((x, y), (y, x))
assert (x*y + 1).args in ((x*y, 1), (1, x*y))
assert sin(x*y).args == (x*y,)
assert sin(x*y).args[0] == x*y
assert (x**y).args == (x, y)
assert (x**y).args[0] == x
assert (x**y).args[1] == y
def test_noncommutative_expand_issue_3757():
A, B, C = symbols('A,B,C', commutative=False)
assert A*B - B*A != 0
assert (A*(A + B)*B).expand() == A**2*B + A*B**2
assert (A*(A + B + C)*B).expand() == A**2*B + A*B**2 + A*C*B
def test_as_numer_denom():
a, b, c = symbols('a, b, c')
assert nan.as_numer_denom() == (nan, 1)
assert oo.as_numer_denom() == (oo, 1)
assert (-oo).as_numer_denom() == (-oo, 1)
assert zoo.as_numer_denom() == (zoo, 1)
assert (-zoo).as_numer_denom() == (zoo, 1)
assert x.as_numer_denom() == (x, 1)
assert (1/x).as_numer_denom() == (1, x)
assert (x/y).as_numer_denom() == (x, y)
assert (x/2).as_numer_denom() == (x, 2)
assert (x*y/z).as_numer_denom() == (x*y, z)
assert (x/(y*z)).as_numer_denom() == (x, y*z)
assert Rational(1, 2).as_numer_denom() == (1, 2)
assert (1/y**2).as_numer_denom() == (1, y**2)
assert (x/y**2).as_numer_denom() == (x, y**2)
assert ((x**2 + 1)/y).as_numer_denom() == (x**2 + 1, y)
assert (x*(y + 1)/y**7).as_numer_denom() == (x*(y + 1), y**7)
assert (x**-2).as_numer_denom() == (1, x**2)
assert (a/x + b/2/x + c/3/x).as_numer_denom() == \
(6*a + 3*b + 2*c, 6*x)
assert (a/x + b/2/x + c/3/y).as_numer_denom() == \
(2*c*x + y*(6*a + 3*b), 6*x*y)
assert (a/x + b/2/x + c/.5/x).as_numer_denom() == \
(2*a + b + 4.0*c, 2*x)
# this should take no more than a few seconds
assert int(log(Add(*[Dummy()/i/x for i in xrange(1, 705)]
).as_numer_denom()[1]/x).n(4)) == 705
for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
assert (i + x/3).as_numer_denom() == \
(x + i, 3)
assert (S.Infinity + x/3 + y/4).as_numer_denom() == \
(4*x + 3*y + S.Infinity, 12)
assert (oo*x + zoo*y).as_numer_denom() == \
(zoo*y + oo*x, 1)
A, B, C = symbols('A,B,C', commutative=False)
assert (A*B*C**-1).as_numer_denom() == (A*B*C**-1, 1)
assert (A*B*C**-1/x).as_numer_denom() == (A*B*C**-1, x)
assert (C**-1*A*B).as_numer_denom() == (C**-1*A*B, 1)
assert (C**-1*A*B/x).as_numer_denom() == (C**-1*A*B, x)
assert ((A*B*C)**-1).as_numer_denom() == ((A*B*C)**-1, 1)
assert ((A*B*C)**-1/x).as_numer_denom() == ((A*B*C)**-1, x)
def test_as_independent():
assert (2*x*sin(x) + y + x).as_independent(x) == (y, x + 2*x*sin(x))
assert (2*x*sin(x) + y + x).as_independent(y) == (x + 2*x*sin(x), y)
assert (2*x*sin(x) + y + x).as_independent(x, y) == (0, y + x + 2*x*sin(x))
assert (x*sin(x)*cos(y)).as_independent(x) == (cos(y), x*sin(x))
assert (x*sin(x)*cos(y)).as_independent(y) == (x*sin(x), cos(y))
assert (x*sin(x)*cos(y)).as_independent(x, y) == (1, x*sin(x)*cos(y))
assert (sin(x)).as_independent(x) == (1, sin(x))
assert (sin(x)).as_independent(y) == (sin(x), 1)
assert (2*sin(x)).as_independent(x) == (2, sin(x))
assert (2*sin(x)).as_independent(y) == (2*sin(x), 1)
# issue 4903 = 1766b
n1, n2, n3 = symbols('n1 n2 n3', commutative=False)
assert (n1 + n1*n2).as_independent(n2) == (n1, n1*n2)
assert (n2*n1 + n1*n2).as_independent(n2) == (0, n1*n2 + n2*n1)
assert (n1*n2*n1).as_independent(n2) == (n1, n2*n1)
assert (n1*n2*n1).as_independent(n1) == (1, n1*n2*n1)
assert (3*x).as_independent(x, as_Add=True) == (0, 3*x)
assert (3*x).as_independent(x, as_Add=False) == (3, x)
assert (3 + x).as_independent(x, as_Add=True) == (3, x)
assert (3 + x).as_independent(x, as_Add=False) == (1, 3 + x)
# issue 5479
assert (3*x).as_independent(Symbol) == (3, x)
# issue 5648
assert (n1*x*y).as_independent(x) == (n1*y, x)
assert ((x + n1)*(x - y)).as_independent(x) == (1, (x + n1)*(x - y))
assert ((x + n1)*(x - y)).as_independent(y) == (x + n1, x - y)
assert (DiracDelta(x - n1)*DiracDelta(x - y)).as_independent(x) \
== (1, DiracDelta(x - n1)*DiracDelta(x - y))
assert (x*y*n1*n2*n3).as_independent(n2) == (x*y*n1, n2*n3)
assert (x*y*n1*n2*n3).as_independent(n1) == (x*y, n1*n2*n3)
assert (x*y*n1*n2*n3).as_independent(n3) == (x*y*n1*n2, n3)
assert (DiracDelta(x - n1)*DiracDelta(y - n1)*DiracDelta(x - n2)).as_independent(y) == \
(DiracDelta(x - n1)*DiracDelta(x - n2), DiracDelta(y - n1))
# issue 5784
assert (x + Integral(x, (x, 1, 2))).as_independent(x, strict=True) == \
(Integral(x, (x, 1, 2)), x)
def test_replace():
f = log(sin(x)) + tan(sin(x**2))
assert f.replace(sin, cos) == log(cos(x)) + tan(cos(x**2))
assert f.replace(
sin, lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2))
a = Wild('a')
b = Wild('b')
assert f.replace(sin(a), cos(a)) == log(cos(x)) + tan(cos(x**2))
assert f.replace(
sin(a), lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2))
# test exact
assert (2*x).replace(a*x + b, b - a, exact=True) == 2*x
assert (2*x).replace(a*x + b, b - a) == 2/x
assert (2*x).replace(a*x + b, lambda a, b: b - a, exact=True) == 2*x
assert (2*x).replace(a*x + b, lambda a, b: b - a) == 2/x
g = 2*sin(x**3)
assert g.replace(
lambda expr: expr.is_Number, lambda expr: expr**2) == 4*sin(x**9)
assert cos(x).replace(cos, sin, map=True) == (sin(x), {cos(x): sin(x)})
assert sin(x).replace(cos, sin) == sin(x)
cond, func = lambda x: x.is_Mul, lambda x: 2*x
assert (x*y).replace(cond, func, map=True) == (2*x*y, {x*y: 2*x*y})
assert (x*(1 + x*y)).replace(cond, func, map=True) == \
(2*x*(2*x*y + 1), {x*(2*x*y + 1): 2*x*(2*x*y + 1), x*y: 2*x*y})
assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y, map=True) == \
(sin(x), {sin(x): sin(x)/y})
# if not simultaneous then y*sin(x) -> y*sin(x)/y = sin(x) -> sin(x)/y
assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y,
simultaneous=False) == sin(x)/y
assert (x**2 + O(x**3)).replace(Pow, lambda b, e: b**e/e) == O(1, x)
assert (x**2 + O(x**3)).replace(Pow, lambda b, e: b**e/e,
simultaneous=False) == x**2/2 + O(x**3)
assert (x*(x*y + 3)).replace(lambda x: x.is_Mul, lambda x: 2 + x) == \
x*(x*y + 5) + 2
e = (x*y + 1)*(2*x*y + 1) + 1
assert e.replace(cond, func, map=True) == (
2*((2*x*y + 1)*(4*x*y + 1)) + 1,
{2*x*y: 4*x*y, x*y: 2*x*y, (2*x*y + 1)*(4*x*y + 1):
2*((2*x*y + 1)*(4*x*y + 1))})
assert x.replace(x, y) == y
assert (x + 1).replace(1, 2) == x + 2
# https://groups.google.com/forum/#!topic/sympy/8wCgeC95tz0
n1, n2, n3 = symbols('n1:4', commutative=False)
f = Function('f')
assert (n1*f(n2)).replace(f, lambda x: x) == n1*n2
assert (n3*f(n2)).replace(f, lambda x: x) == n3*n2
def test_find():
expr = (x + y + 2 + sin(3*x))
assert expr.find(lambda u: u.is_Integer) == set([S(2), S(3)])
assert expr.find(lambda u: u.is_Symbol) == set([x, y])
assert expr.find(lambda u: u.is_Integer, group=True) == {S(2): 1, S(3): 1}
assert expr.find(lambda u: u.is_Symbol, group=True) == {x: 2, y: 1}
assert expr.find(Integer) == set([S(2), S(3)])
assert expr.find(Symbol) == set([x, y])
assert expr.find(Integer, group=True) == {S(2): 1, S(3): 1}
assert expr.find(Symbol, group=True) == {x: 2, y: 1}
a = Wild('a')
expr = sin(sin(x)) + sin(x) + cos(x) + x
assert expr.find(lambda u: type(u) is sin) == set([sin(x), sin(sin(x))])
assert expr.find(
lambda u: type(u) is sin, group=True) == {sin(x): 2, sin(sin(x)): 1}
assert expr.find(sin(a)) == set([sin(x), sin(sin(x))])
assert expr.find(sin(a), group=True) == {sin(x): 2, sin(sin(x)): 1}
assert expr.find(sin) == set([sin(x), sin(sin(x))])
assert expr.find(sin, group=True) == {sin(x): 2, sin(sin(x)): 1}
def test_count():
expr = (x + y + 2 + sin(3*x))
assert expr.count(lambda u: u.is_Integer) == 2
assert expr.count(lambda u: u.is_Symbol) == 3
assert expr.count(Integer) == 2
assert expr.count(Symbol) == 3
assert expr.count(2) == 1
a = Wild('a')
assert expr.count(sin) == 1
assert expr.count(sin(a)) == 1
assert expr.count(lambda u: type(u) is sin) == 1
def test_has_basics():
f = Function('f')
g = Function('g')
p = Wild('p')
assert sin(x).has(x)
assert sin(x).has(sin)
assert not sin(x).has(y)
assert not sin(x).has(cos)
assert f(x).has(x)
assert f(x).has(f)
assert not f(x).has(y)
assert not f(x).has(g)
assert f(x).diff(x).has(x)
assert f(x).diff(x).has(f)
assert f(x).diff(x).has(Derivative)
assert not f(x).diff(x).has(y)
assert not f(x).diff(x).has(g)
assert not f(x).diff(x).has(sin)
assert (x**2).has(Symbol)
assert not (x**2).has(Wild)
assert (2*p).has(Wild)
assert not x.has()
def test_has_multiple():
f = x**2*y + sin(2**t + log(z))
assert f.has(x)
assert f.has(y)
assert f.has(z)
assert f.has(t)
assert not f.has(u)
assert f.has(x, y, z, t)
assert f.has(x, y, z, t, u)
i = Integer(4400)
assert not i.has(x)
assert (i*x**i).has(x)
assert not (i*y**i).has(x)
assert (i*y**i).has(x, y)
assert not (i*y**i).has(x, z)
def test_has_piecewise():
f = (x*y + 3/y)**(3 + 2)
g = Function('g')
h = Function('h')
p = Piecewise((g(x), x < -1), (1, x <= 1), (f, True))
assert p.has(x)
assert p.has(y)
assert not p.has(z)
assert p.has(1)
assert p.has(3)
assert not p.has(4)
assert p.has(f)
assert p.has(g)
assert not p.has(h)
def test_has_iterative():
A, B, C = symbols('A,B,C', commutative=False)
f = x*gamma(x)*sin(x)*exp(x*y)*A*B*C*cos(x*A*B)
assert f.has(x)
assert f.has(x*y)
assert f.has(x*sin(x))
assert not f.has(x*sin(y))
assert f.has(x*A)
assert f.has(x*A*B)
assert not f.has(x*A*C)
assert f.has(x*A*B*C)
assert not f.has(x*A*C*B)
assert f.has(x*sin(x)*A*B*C)
assert not f.has(x*sin(x)*A*C*B)
assert not f.has(x*sin(y)*A*B*C)
assert f.has(x*gamma(x))
assert not f.has(x + sin(x))
assert (x & y & z).has(x & z)
def test_has_integrals():
f = Integral(x**2 + sin(x*y*z), (x, 0, x + y + z))
assert f.has(x + y)
assert f.has(x + z)
assert f.has(y + z)
assert f.has(x*y)
assert f.has(x*z)
assert f.has(y*z)
assert not f.has(2*x + y)
assert not f.has(2*x*y)
def test_has_tuple():
f = Function('f')
g = Function('g')
h = Function('h')
assert Tuple(x, y).has(x)
assert not Tuple(x, y).has(z)
assert Tuple(f(x), g(x)).has(x)
assert not Tuple(f(x), g(x)).has(y)
assert Tuple(f(x), g(x)).has(f)
assert Tuple(f(x), g(x)).has(f(x))
assert not Tuple(f, g).has(x)
assert Tuple(f, g).has(f)
assert not Tuple(f, g).has(h)
assert Tuple(True).has(True) is True # .has(1) will also be True
def test_has_units():
from sympy.physics.units import m, s
assert (x*m/s).has(x)
assert (x*m/s).has(y, z) is False
def test_has_polys():
poly = Poly(x**2 + x*y*sin(z), x, y, t)
assert poly.has(x)
assert poly.has(x, y, z)
assert poly.has(x, y, z, t)
def test_has_physics():
assert FockState((x, y)).has(x)
def test_as_poly_as_expr():
f = x**2 + 2*x*y
assert f.as_poly().as_expr() == f
assert f.as_poly(x, y).as_expr() == f
assert (f + sin(x)).as_poly(x, y) is None
p = Poly(f, x, y)
assert p.as_poly() == p
def test_nonzero():
assert bool(S.Zero) is False
assert bool(S.One) is True
assert bool(x) is True
assert bool(x + y) is True
assert bool(x - x) is False
assert bool(x*y) is True
assert bool(x*1) is True
assert bool(x*0) is False
def test_is_number():
assert Float(3.14).is_number is True
assert Integer(737).is_number is True
assert Rational(3, 2).is_number is True
assert Rational(8).is_number is True
assert x.is_number is False
assert (2*x).is_number is False
assert (x + y).is_number is False
assert log(2).is_number is True
assert log(x).is_number is False
assert (2 + log(2)).is_number is True
assert (8 + log(2)).is_number is True
assert (2 + log(x)).is_number is False
assert (8 + log(2) + x).is_number is False
assert (1 + x**2/x - x).is_number is True
assert Tuple(Integer(1)).is_number is False
assert Add(2, x).is_number is False
assert Mul(3, 4).is_number is True
assert Pow(log(2), 2).is_number is True
assert oo.is_number is True
g = WildFunction('g')
assert g.is_number is False
assert (2*g).is_number is False
assert (x**2).subs(x, 3).is_number is True
# test extensibility of .is_number
# on subinstances of Basic
class A(Basic):
pass
a = A()
assert a.is_number is False
def test_as_coeff_add():
assert S(2).as_coeff_add() == (2, ())
assert S(3.0).as_coeff_add() == (0, (S(3.0),))
assert S(-3.0).as_coeff_add() == (0, (S(-3.0),))
assert x.as_coeff_add() == (0, (x,))
assert (x - 1).as_coeff_add() == (-1, (x,))
assert (x + 1).as_coeff_add() == (1, (x,))
assert (x + 2).as_coeff_add() == (2, (x,))
assert (x + y).as_coeff_add(y) == (x, (y,))
assert (3*x).as_coeff_add(y) == (3*x, ())
# don't do expansion
e = (x + y)**2
assert e.as_coeff_add(y) == (0, (e,))
def test_as_coeff_mul():
assert S(2).as_coeff_mul() == (2, ())
assert S(3.0).as_coeff_mul() == (1, (S(3.0),))
assert S(-3.0).as_coeff_mul() == (-1, (S(3.0),))
assert S(-3.0).as_coeff_mul(rational=False) == (-S(3.0), ())
assert x.as_coeff_mul() == (1, (x,))
assert (-x).as_coeff_mul() == (-1, (x,))
assert (2*x).as_coeff_mul() == (2, (x,))
assert (x*y).as_coeff_mul(y) == (x, (y,))
assert (3 + x).as_coeff_mul() == (1, (3 + x,))
assert (3 + x).as_coeff_mul(y) == (3 + x, ())
# don't do expansion
e = exp(x + y)
assert e.as_coeff_mul(y) == (1, (e,))
e = 2**(x + y)
assert e.as_coeff_mul(y) == (1, (e,))
assert (1.1*x).as_coeff_mul(rational=False) == (1.1, (x,))
assert (1.1*x).as_coeff_mul() == (1, (1.1, x))
assert (-oo*x).as_coeff_mul(rational=True) == (-1, (oo, x))
def test_as_coeff_exponent():
assert (3*x**4).as_coeff_exponent(x) == (3, 4)
assert (2*x**3).as_coeff_exponent(x) == (2, 3)
assert (4*x**2).as_coeff_exponent(x) == (4, 2)
assert (6*x**1).as_coeff_exponent(x) == (6, 1)
assert (3*x**0).as_coeff_exponent(x) == (3, 0)
assert (2*x**0).as_coeff_exponent(x) == (2, 0)
assert (1*x**0).as_coeff_exponent(x) == (1, 0)
assert (0*x**0).as_coeff_exponent(x) == (0, 0)
assert (-1*x**0).as_coeff_exponent(x) == (-1, 0)
assert (-2*x**0).as_coeff_exponent(x) == (-2, 0)
assert (2*x**3 + pi*x**3).as_coeff_exponent(x) == (2 + pi, 3)
assert (x*log(2)/(2*x + pi*x)).as_coeff_exponent(x) == \
(log(2)/(2 + pi), 0)
# issue 4784
D = Derivative
f = Function('f')
fx = D(f(x), x)
assert fx.as_coeff_exponent(f(x)) == (fx, 0)
def test_extractions():
assert ((x*y)**3).extract_multiplicatively(x**2 * y) == x*y**2
assert ((x*y)**3).extract_multiplicatively(x**4 * y) is None
assert (2*x).extract_multiplicatively(2) == x
assert (2*x).extract_multiplicatively(3) is None
assert (2*x).extract_multiplicatively(-1) is None
assert (Rational(1, 2)*x).extract_multiplicatively(3) == x/6
assert (sqrt(x)).extract_multiplicatively(x) is None
assert (sqrt(x)).extract_multiplicatively(1/x) is None
assert x.extract_multiplicatively(-x) is None
assert ((x*y)**3).extract_additively(1) is None
assert (x + 1).extract_additively(x) == 1
assert (x + 1).extract_additively(2*x) is None
assert (x + 1).extract_additively(-x) is None
assert (-x + 1).extract_additively(2*x) is None
assert (2*x + 3).extract_additively(x) == x + 3
assert (2*x + 3).extract_additively(2) == 2*x + 1
assert (2*x + 3).extract_additively(3) == 2*x
assert (2*x + 3).extract_additively(-2) is None
assert (2*x + 3).extract_additively(3*x) is None
assert (2*x + 3).extract_additively(2*x) == 3
assert x.extract_additively(0) == x
assert S(2).extract_additively(x) is None
assert S(2.).extract_additively(2) == S.Zero
assert S(2*x + 3).extract_additively(x + 1) == x + 2
assert S(2*x + 3).extract_additively(y + 1) is None
assert S(2*x - 3).extract_additively(x + 1) is None
assert S(2*x - 3).extract_additively(y + z) is None
assert ((a + 1)*x*4 + y).extract_additively(x).expand() == \
4*a*x + 3*x + y
assert ((a + 1)*x*4 + 3*y).extract_additively(x + 2*y).expand() == \
4*a*x + 3*x + y
assert (y*(x + 1)).extract_additively(x + 1) is None
assert ((y + 1)*(x + 1) + 3).extract_additively(x + 1) == \
y*(x + 1) + 3
assert ((x + y)*(x + 1) + x + y + 3).extract_additively(x + y) == \
x*(x + y) + 3
assert (x + y + 2*((x + y)*(x + 1)) + 3).extract_additively((x + y)*(x + 1)) == \
x + y + (x + 1)*(x + y) + 3
assert ((y + 1)*(x + 2*y + 1) + 3).extract_additively(y + 1) == \
(x + 2*y)*(y + 1) + 3
n = Symbol("n", integer=True)
assert (Integer(-3)).could_extract_minus_sign() is True
assert (-n*x + x).could_extract_minus_sign() != \
(n*x - x).could_extract_minus_sign()
assert (x - y).could_extract_minus_sign() != \
(-x + y).could_extract_minus_sign()
assert (1 - x - y).could_extract_minus_sign() is True
assert (1 - x + y).could_extract_minus_sign() is False
assert ((-x - x*y)/y).could_extract_minus_sign() is True
assert (-(x + x*y)/y).could_extract_minus_sign() is True
assert ((x + x*y)/(-y)).could_extract_minus_sign() is True
assert ((x + x*y)/y).could_extract_minus_sign() is False
assert (x*(-x - x**3)).could_extract_minus_sign() is True
assert ((-x - y)/(x + y)).could_extract_minus_sign() is True
# The results of each of these will vary on different machines, e.g.
# the first one might be False and the other (then) is true or vice versa,
# so both are included.
assert ((-x - y)/(x - y)).could_extract_minus_sign() is False or \
((-x - y)/(y - x)).could_extract_minus_sign() is False
assert (x - y).could_extract_minus_sign() is False
assert (-x + y).could_extract_minus_sign() is True
def test_coeff():
assert (x + 1).coeff(x + 1) == 1
assert (3*x).coeff(0) == 0
assert (z*(1 + x)*x**2).coeff(1 + x) == z*x**2
assert (1 + 2*x*x**(1 + x)).coeff(x*x**(1 + x)) == 2
assert (1 + 2*x**(y + z)).coeff(x**(y + z)) == 2
assert (3 + 2*x + 4*x**2).coeff(1) == 0
assert (3 + 2*x + 4*x**2).coeff(-1) == 0
assert (3 + 2*x + 4*x**2).coeff(x) == 2
assert (3 + 2*x + 4*x**2).coeff(x**2) == 4
assert (3 + 2*x + 4*x**2).coeff(x**3) == 0
assert (-x/8 + x*y).coeff(x) == -S(1)/8 + y
assert (-x/8 + x*y).coeff(-x) == S(1)/8
assert (4*x).coeff(2*x) == 0
assert (2*x).coeff(2*x) == 1
assert (-oo*x).coeff(x*oo) == -1
assert (10*x).coeff(x, 0) == 0
assert (10*x).coeff(10*x, 0) == 0
n1, n2 = symbols('n1 n2', commutative=False)
assert (n1*n2).coeff(n1) == 1
assert (n1*n2).coeff(n2) == n1
assert (n1*n2 + x*n1).coeff(n1) == 1 # 1*n1*(n2+x)
assert (n2*n1 + x*n1).coeff(n1) == n2 + x
assert (n2*n1 + x*n1**2).coeff(n1) == n2
assert (n1**x).coeff(n1) == 0
assert (n1*n2 + n2*n1).coeff(n1) == 0
assert (2*(n1 + n2)*n2).coeff(n1 + n2, right=1) == n2
assert (2*(n1 + n2)*n2).coeff(n1 + n2, right=0) == 2
f = Function('f')
assert (2*f(x) + 3*f(x).diff(x)).coeff(f(x)) == 2
expr = z*(x + y)**2
expr2 = z*(x + y)**2 + z*(2*x + 2*y)**2
assert expr.coeff(z) == (x + y)**2
assert expr.coeff(x + y) == 0
assert expr2.coeff(z) == (x + y)**2 + (2*x + 2*y)**2
assert (x + y + 3*z).coeff(1) == x + y
assert (-x + 2*y).coeff(-1) == x
assert (x - 2*y).coeff(-1) == 2*y
assert (3 + 2*x + 4*x**2).coeff(1) == 0
assert (-x - 2*y).coeff(2) == -y
assert (x + sqrt(2)*x).coeff(sqrt(2)) == x
assert (3 + 2*x + 4*x**2).coeff(x) == 2
assert (3 + 2*x + 4*x**2).coeff(x**2) == 4
assert (3 + 2*x + 4*x**2).coeff(x**3) == 0
assert (z*(x + y)**2).coeff((x + y)**2) == z
assert (z*(x + y)**2).coeff(x + y) == 0
assert (2 + 2*x + (x + 1)*y).coeff(x + 1) == y
assert (x + 2*y + 3).coeff(1) == x
assert (x + 2*y + 3).coeff(x, 0) == 2*y + 3
assert (x**2 + 2*y + 3*x).coeff(x**2, 0) == 2*y + 3*x
assert x.coeff(0, 0) == 0
assert x.coeff(x, 0) == 0
n, m, o, l = symbols('n m o l', commutative=False)
assert n.coeff(n) == 1
assert y.coeff(n) == 0
assert (3*n).coeff(n) == 3
assert (2 + n).coeff(x*m) == 0
assert (2*x*n*m).coeff(x) == 2*n*m
assert (2 + n).coeff(x*m*n + y) == 0
assert (2*x*n*m).coeff(3*n) == 0
assert (n*m + m*n*m).coeff(n) == 1 + m
assert (n*m + m*n*m).coeff(n, right=True) == m # = (1 + m)*n*m
assert (n*m + m*n).coeff(n) == 0
assert (n*m + o*m*n).coeff(m*n) == o
assert (n*m + o*m*n).coeff(m*n, right=1) == 1
assert (n*m + n*m*n).coeff(n*m, right=1) == 1 + n # = n*m*(n + 1)
def test_coeff2():
r, kappa = symbols('r, kappa')
psi = Function("psi")
g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2))
g = g.expand()
assert g.coeff((psi(r).diff(r))) == 2/r
def test_coeff2_0():
r, kappa = symbols('r, kappa')
psi = Function("psi")
g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2))
g = g.expand()
assert g.coeff(psi(r).diff(r, 2)) == 1
def test_coeff_expand():
expr = z*(x + y)**2
expr2 = z*(x + y)**2 + z*(2*x + 2*y)**2
assert expr.coeff(z) == (x + y)**2
assert expr2.coeff(z) == (x + y)**2 + (2*x + 2*y)**2
def test_integrate():
assert x.integrate(x) == x**2/2
assert x.integrate((x, 0, 1)) == S(1)/2
def test_as_base_exp():
assert x.as_base_exp() == (x, S.One)
assert (x*y*z).as_base_exp() == (x*y*z, S.One)
assert (x + y + z).as_base_exp() == (x + y + z, S.One)
assert ((x + y)**z).as_base_exp() == (x + y, z)
def test_issue_4963():
assert hasattr(Mul(x, y), "is_commutative")
assert hasattr(Mul(x, y, evaluate=False), "is_commutative")
assert hasattr(Pow(x, y), "is_commutative")
assert hasattr(Pow(x, y, evaluate=False), "is_commutative")
expr = Mul(Pow(2, 2, evaluate=False), 3, evaluate=False) + 1
assert hasattr(expr, "is_commutative")
def test_action_verbs():
assert nsimplify((1/(exp(3*pi*x/5) + 1))) == \
(1/(exp(3*pi*x/5) + 1)).nsimplify()
assert ratsimp(1/x + 1/y) == (1/x + 1/y).ratsimp()
assert trigsimp(log(x), deep=True) == (log(x)).trigsimp(deep=True)
assert radsimp(1/(2 + sqrt(2))) == (1/(2 + sqrt(2))).radsimp()
assert powsimp(x**y*x**z*y**z, combine='all') == \
(x**y*x**z*y**z).powsimp(combine='all')
assert simplify(x**y*x**z*y**z) == (x**y*x**z*y**z).simplify()
assert together(1/x + 1/y) == (1/x + 1/y).together()
assert collect(a*x**2 + b*x**2 + a*x - b*x + c, x) == \
(a*x**2 + b*x**2 + a*x - b*x + c).collect(x)
assert apart(y/(y + 2)/(y + 1), y) == (y/(y + 2)/(y + 1)).apart(y)
assert combsimp(y/(x + 2)/(x + 1)) == (y/(x + 2)/(x + 1)).combsimp()
assert factor(x**2 + 5*x + 6) == (x**2 + 5*x + 6).factor()
assert refine(sqrt(x**2)) == sqrt(x**2).refine()
assert cancel((x**2 + 5*x + 6)/(x + 2)) == ((x**2 + 5*x + 6)/(x + 2)).cancel()
def test_as_powers_dict():
assert x.as_powers_dict() == {x: 1}
assert (x**y*z).as_powers_dict() == {x: y, z: 1}
assert Mul(2, 2, evaluate=False).as_powers_dict() == {S(2): S(2)}
assert (x*y).as_powers_dict()[z] == 0
assert (x + y).as_powers_dict()[z] == 0
def test_as_coefficients_dict():
check = [S(1), x, y, x*y, 1]
assert [Add(3*x, 2*x, y, 3).as_coefficients_dict()[i] for i in check] == \
[3, 5, 1, 0, 3]
assert [(3*x*y).as_coefficients_dict()[i] for i in check] == \
[0, 0, 0, 3, 0]
assert (3.0*x*y).as_coefficients_dict()[3.0*x*y] == 1
def test_args_cnc():
A = symbols('A', commutative=False)
assert (x + A).args_cnc() == \
[[], [x + A]]
assert (x + a).args_cnc() == \
[[a + x], []]
assert (x*a).args_cnc() == \
[[a, x], []]
assert (x*y*A*(A + 1)).args_cnc(cset=True) == \
[set([x, y]), [A, 1 + A]]
assert Mul(x, x, evaluate=False).args_cnc(cset=True, warn=False) == \
[set([x]), []]
assert Mul(x, x**2, evaluate=False).args_cnc(cset=True, warn=False) == \
[set([x, x**2]), []]
raises(ValueError, lambda: Mul(x, x, evaluate=False).args_cnc(cset=True))
assert Mul(x, y, x, evaluate=False).args_cnc() == \
[[x, y, x], []]
# always split -1 from leading number
assert (-1.*x).args_cnc() == [[-1, 1.0, x], []]
def test_new_rawargs():
n = Symbol('n', commutative=False)
a = x + n
assert a.is_commutative is False
assert a._new_rawargs(x).is_commutative
assert a._new_rawargs(x, y).is_commutative
assert a._new_rawargs(x, n).is_commutative is False
assert a._new_rawargs(x, y, n).is_commutative is False
m = x*n
assert m.is_commutative is False
assert m._new_rawargs(x).is_commutative
assert m._new_rawargs(n).is_commutative is False
assert m._new_rawargs(x, y).is_commutative
assert m._new_rawargs(x, n).is_commutative is False
assert m._new_rawargs(x, y, n).is_commutative is False
assert m._new_rawargs(x, n, reeval=False).is_commutative is False
assert m._new_rawargs(S.One) is S.One
def test_issue_5226():
assert Add(evaluate=False) == 0
assert Mul(evaluate=False) == 1
assert Mul(x + y, evaluate=False).is_Add
def test_free_symbols():
# free_symbols should return the free symbols of an object
assert S(1).free_symbols == set()
assert (x).free_symbols == set([x])
assert Integral(x, (x, 1, y)).free_symbols == set([y])
assert (-Integral(x, (x, 1, y))).free_symbols == set([y])
assert meter.free_symbols == set()
assert (meter**x).free_symbols == set([x])
def test_issue_5300():
x = Symbol('x', commutative=False)
assert x*sqrt(2)/sqrt(6) == x*sqrt(3)/3
def test_as_coeff_Mul():
assert Integer(3).as_coeff_Mul() == (Integer(3), Integer(1))
assert Rational(3, 4).as_coeff_Mul() == (Rational(3, 4), Integer(1))
assert Float(5.0).as_coeff_Mul() == (Float(5.0), Integer(1))
assert (Integer(3)*x).as_coeff_Mul() == (Integer(3), x)
assert (Rational(3, 4)*x).as_coeff_Mul() == (Rational(3, 4), x)
assert (Float(5.0)*x).as_coeff_Mul() == (Float(5.0), x)
assert (Integer(3)*x*y).as_coeff_Mul() == (Integer(3), x*y)
assert (Rational(3, 4)*x*y).as_coeff_Mul() == (Rational(3, 4), x*y)
assert (Float(5.0)*x*y).as_coeff_Mul() == (Float(5.0), x*y)
assert (x).as_coeff_Mul() == (S.One, x)
assert (x*y).as_coeff_Mul() == (S.One, x*y)
assert (-oo*x).as_coeff_Mul(rational=True) == (-1, oo*x)
def test_as_coeff_Add():
assert Integer(3).as_coeff_Add() == (Integer(3), Integer(0))
assert Rational(3, 4).as_coeff_Add() == (Rational(3, 4), Integer(0))
assert Float(5.0).as_coeff_Add() == (Float(5.0), Integer(0))
assert (Integer(3) + x).as_coeff_Add() == (Integer(3), x)
assert (Rational(3, 4) + x).as_coeff_Add() == (Rational(3, 4), x)
assert (Float(5.0) + x).as_coeff_Add() == (Float(5.0), x)
assert (Integer(3) + x + y).as_coeff_Add() == (Integer(3), x + y)
assert (Rational(3, 4) + x + y).as_coeff_Add() == (Rational(3, 4), x + y)
assert (Float(5.0) + x + y).as_coeff_Add() == (Float(5.0), x + y)
assert (x).as_coeff_Add() == (S.Zero, x)
assert (x*y).as_coeff_Add() == (S.Zero, x*y)
def test_expr_sorting():
f, g = symbols('f,g', cls=Function)
exprs = [1/x**2, 1/x, sqrt(sqrt(x)), sqrt(x), x, sqrt(x)**3, x**2]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [x, 2*x, 2*x**2, 2*x**3, x**n, 2*x**n, sin(x), sin(x)**n,
sin(x**2), cos(x), cos(x**2), tan(x)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [x + 1, x**2 + x + 1, x**3 + x**2 + x + 1]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [S(4), x - 3*I/2, x + 3*I/2, x - 4*I + 1, x + 4*I + 1]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [f(x), g(x), exp(x), sin(x), cos(x), factorial(x)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [Tuple(x, y), Tuple(x, z), Tuple(x, y, z)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[3], [1, 2]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[1, 2], [2, 3]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[1, 2], [1, 2, 3]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [{x: -y}, {x: y}]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [set([1]), set([1, 2])]
assert sorted(exprs, key=default_sort_key) == exprs
a, b = exprs = [Dummy('x'), Dummy('x')]
assert sorted([b, a], key=default_sort_key) == exprs
def test_as_ordered_factors():
f, g = symbols('f,g', cls=Function)
assert x.as_ordered_factors() == [x]
assert (2*x*x**n*sin(x)*cos(x)).as_ordered_factors() \
== [Integer(2), x, x**n, sin(x), cos(x)]
args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
expr = Mul(*args)
assert expr.as_ordered_factors() == args
A, B = symbols('A,B', commutative=False)
assert (A*B).as_ordered_factors() == [A, B]
assert (B*A).as_ordered_factors() == [B, A]
def test_as_ordered_terms():
f, g = symbols('f,g', cls=Function)
assert x.as_ordered_terms() == [x]
assert (sin(x)**2*cos(x) + sin(x)*cos(x)**2 + 1).as_ordered_terms() \
== [sin(x)**2*cos(x), sin(x)*cos(x)**2, 1]
args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
expr = Add(*args)
assert expr.as_ordered_terms() == args
assert (1 + 4*sqrt(3)*pi*x).as_ordered_terms() == [4*pi*x*sqrt(3), 1]
assert ( 2 + 3*I).as_ordered_terms() == [2, 3*I]
assert (-2 + 3*I).as_ordered_terms() == [-2, 3*I]
assert ( 2 - 3*I).as_ordered_terms() == [2, -3*I]
assert (-2 - 3*I).as_ordered_terms() == [-2, -3*I]
assert ( 4 + 3*I).as_ordered_terms() == [4, 3*I]
assert (-4 + 3*I).as_ordered_terms() == [-4, 3*I]
assert ( 4 - 3*I).as_ordered_terms() == [4, -3*I]
assert (-4 - 3*I).as_ordered_terms() == [-4, -3*I]
f = x**2*y**2 + x*y**4 + y + 2
assert f.as_ordered_terms(order="lex") == [x**2*y**2, x*y**4, y, 2]
assert f.as_ordered_terms(order="grlex") == [x*y**4, x**2*y**2, y, 2]
assert f.as_ordered_terms(order="rev-lex") == [2, y, x*y**4, x**2*y**2]
assert f.as_ordered_terms(order="rev-grlex") == [2, y, x**2*y**2, x*y**4]
def test_sort_key_atomic_expr():
from sympy.physics.units import m, s
assert sorted([-m, s], key=lambda arg: arg.sort_key()) == [-m, s]
def test_issue_4199():
# first subs and limit gives NaN
a = x/y
assert a._eval_interval(x, 0, oo)._eval_interval(y, oo, 0) is S.NaN
# second subs and limit gives NaN
assert a._eval_interval(x, 0, oo)._eval_interval(y, 0, oo) is S.NaN
# difference gives S.NaN
a = x - y
assert a._eval_interval(x, 1, oo)._eval_interval(y, oo, 1) is S.NaN
raises(ValueError, lambda: x._eval_interval(x, None, None))
a = -y*Heaviside(x - y)
assert a._eval_interval(x, -oo, oo) == -y
assert a._eval_interval(x, oo, -oo) == y
def test_primitive():
assert (3*(x + 1)**2).primitive() == (3, (x + 1)**2)
assert (6*x + 2).primitive() == (2, 3*x + 1)
assert (x/2 + 3).primitive() == (S(1)/2, x + 6)
eq = (6*x + 2)*(x/2 + 3)
assert eq.primitive()[0] == 1
eq = (2 + 2*x)**2
assert eq.primitive()[0] == 1
assert (4.0*x).primitive() == (1, 4.0*x)
assert (4.0*x + y/2).primitive() == (S.Half, 8.0*x + y)
assert (-2*x).primitive() == (2, -x)
assert Add(5*z/7, 0.5*x, 3*y/2, evaluate=False).primitive() == \
(S(1)/14, 7.0*x + 21*y + 10*z)
for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
assert (i + x/3).primitive() == \
(S(1)/3, i + x)
assert (S.Infinity + 2*x/3 + 4*y/7).primitive() == \
(S(1)/21, 14*x + 12*y + oo)
assert S.Zero.primitive() == (S.One, S.Zero)
def test_issue_5843():
a = 1 + x
assert (2*a).extract_multiplicatively(a) == 2
assert (4*a).extract_multiplicatively(2*a) == 2
assert ((3*a)*(2*a)).extract_multiplicatively(a) == 6*a
def test_is_constant():
from sympy.solvers.solvers import checksol
Sum(x, (x, 1, 10)).is_constant() is True
Sum(x, (x, 1, n)).is_constant() is False
Sum(x, (x, 1, n)).is_constant(y) is True
Sum(x, (x, 1, n)).is_constant(n) is False
Sum(x, (x, 1, n)).is_constant(x) is True
eq = a*cos(x)**2 + a*sin(x)**2 - a
eq.is_constant() is True
assert eq.subs({x: pi, a: 2}) == eq.subs({x: pi, a: 3}) == 0
assert x.is_constant() is False
assert x.is_constant(y) is True
assert checksol(x, x, Sum(x, (x, 1, n))) is False
assert checksol(x, x, Sum(x, (x, 1, n))) is False
f = Function('f')
assert checksol(x, x, f(x)) is False
p = symbols('p', positive=True)
assert Pow(x, S(0), evaluate=False).is_constant() is True # == 1
assert Pow(S(0), x, evaluate=False).is_constant() is False # == 0 or 1
assert Pow(S(0), p, evaluate=False).is_constant() is True # == 1
assert (2**x).is_constant() is False
assert Pow(S(2), S(3), evaluate=False).is_constant() is True
z1, z2 = symbols('z1 z2', zero=True)
assert (z1 + 2*z2).is_constant() is True
assert meter.is_constant() is True
assert (3*meter).is_constant() is True
assert (x*meter).is_constant() is False
def test_equals():
assert (-3 - sqrt(5) + (-sqrt(10)/2 - sqrt(2)/2)**2).equals(0)
assert (x**2 - 1).equals((x + 1)*(x - 1))
assert (cos(x)**2 + sin(x)**2).equals(1)
assert (a*cos(x)**2 + a*sin(x)**2).equals(a)
r = sqrt(2)
assert (-1/(r + r*x) + 1/r/(1 + x)).equals(0)
assert factorial(x + 1).equals((x + 1)*factorial(x))
assert sqrt(3).equals(2*sqrt(3)) is False
assert (sqrt(5)*sqrt(3)).equals(sqrt(3)) is False
assert (sqrt(5) + sqrt(3)).equals(0) is False
assert (sqrt(5) + pi).equals(0) is False
assert meter.equals(0) is False
assert (3*meter**2).equals(0) is False
eq = -(-1)**(S(3)/4)*6**(S(1)/4) + (-6)**(S(1)/4)*I
if eq != 0: # if canonicalization makes this zero, skip the test
assert eq.equals(0)
assert sqrt(x).equals(0) is False
# from integrate(x*sqrt(1 + 2*x), x);
# diff is zero only when assumptions allow
i = 2*sqrt(2)*x**(S(5)/2)*(1 + 1/(2*x))**(S(5)/2)/5 + \
2*sqrt(2)*x**(S(3)/2)*(1 + 1/(2*x))**(S(5)/2)/(-6 - 3/x)
ans = sqrt(2*x + 1)*(6*x**2 + x - 1)/15
diff = i - ans
assert diff.equals(0) is False
assert diff.subs(x, -S.Half/2) == 7*sqrt(2)/120
# there are regions for x for which the expression is True, for
# example, when x < -1/2 or x > 0 the expression is zero
p = Symbol('p', positive=True)
assert diff.subs(x, p).equals(0) is True
assert diff.subs(x, -1).equals(0) is True
# prove via minimal_polynomial or self-consistency
eq = sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3)) - sqrt(10 + 6*sqrt(3))
assert eq.equals(0)
q = 3**Rational(1, 3) + 3
p = expand(q**3)**Rational(1, 3)
assert (p - q).equals(0)
# issue 6829
# eq = q*x + q/4 + x**4 + x**3 + 2*x**2 - S(1)/3
# z = eq.subs(x, solve(eq, x)[0])
q = symbols('q')
z = (q*(-sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12)/2 - sqrt((2*q - S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/6)/2 - S(1)/4) + q/4 + (-sqrt(-2*(-(q
- S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q
- S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/6)/2 - S(1)/4)**4 + (-sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q -
S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/6)/2 - S(1)/4)**3 + 2*(-sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q -
S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/6)/2 - S(1)/4)**2 - S(1)/3)
assert z.equals(0)
def test_random():
from sympy import posify, lucas
assert posify(x)[0]._random() is not None
assert lucas(n)._random(2, -2, 0, -1, 1) is None
def test_round():
from sympy.abc import x
assert Float('0.1249999').round(2) == 0.12
d20 = 12345678901234567890
ans = S(d20).round(2)
assert ans.is_Float and ans == d20
ans = S(d20).round(-2)
assert ans.is_Float and ans == 12345678901234567900
assert S('1/7').round(4) == 0.1429
assert S('.[12345]').round(4) == 0.1235
assert S('.1349').round(2) == 0.13
n = S(12345)
ans = n.round()
assert ans.is_Float
assert ans == n
ans = n.round(1)
assert ans.is_Float
assert ans == n
ans = n.round(4)
assert ans.is_Float
assert ans == n
assert n.round(-1) == 12350
r = n.round(-4)
assert r == 10000
# in fact, it should equal many values since __eq__
# compares at equal precision
assert all(r == i for i in range(9984, 10049))
assert n.round(-5) == 0
assert (pi + sqrt(2)).round(2) == 4.56
assert (10*(pi + sqrt(2))).round(-1) == 50
raises(TypeError, lambda: round(x + 2, 2))
assert S(2.3).round(1) == 2.3
e = S(12.345).round(2)
assert e == round(12.345, 2)
assert type(e) is Float
assert (Float(.3, 3) + 2*pi).round() == 7
assert (Float(.3, 3) + 2*pi*100).round() == 629
assert (Float(.03, 3) + 2*pi/100).round(5) == 0.09283
assert (Float(.03, 3) + 2*pi/100).round(4) == 0.0928
assert (pi + 2*E*I).round() == 3 + 5*I
assert S.Zero.round() == 0
a = (Add(1, Float('1.' + '9'*27, ''), evaluate=0))
assert a.round(10) == Float('3.0000000000', '')
assert a.round(25) == Float('3.0000000000000000000000000', '')
assert a.round(26) == Float('3.00000000000000000000000000', '')
assert a.round(27) == Float('2.999999999999999999999999999', '')
assert a.round(30) == Float('2.999999999999999999999999999', '')
raises(TypeError, lambda: x.round())
# exact magnitude of 10
assert str(S(1).round()) == '1.'
assert str(S(100).round()) == '100.'
# applied to real and imaginary portions
assert (2*pi + E*I).round() == 6 + 3*I
assert (2*pi + I/10).round() == 6
assert (pi/10 + 2*I).round() == 2*I
# the lhs re and im parts are Float with dps of 2
# and those on the right have dps of 15 so they won't compare
# equal unless we use string or compare components (which will
# then coerce the floats to the same precision) or re-create
# the floats
assert str((pi/10 + E*I).round(2)) == '0.31 + 2.72*I'
assert (pi/10 + E*I).round(2).as_real_imag() == (0.31, 2.72)
assert (pi/10 + E*I).round(2) == Float(0.31, 2) + I*Float(2.72, 3)
# issue 6914
assert (I**(I + 3)).round(3) == Float('-0.208', '')*I
# issue 7961
assert str(S(0.006).round(2)) == '0.01'
assert str(S(0.00106).round(4)) == '0.0011'
# issue 8147
assert S.NaN.round() == S.NaN
assert S.Infinity.round() == S.Infinity
assert S.NegativeInfinity.round() == S.NegativeInfinity
assert S.ComplexInfinity.round() == S.ComplexInfinity
def test_round_exception_nostr():
# Don't use the string form of the expression in the round exception, as
# it's too slow
s = Symbol('bad')
try:
s.round()
except TypeError as e:
assert 'bad' not in str(e)
else:
# Did not raise
raise AssertionError("Did not raise")
def test_extract_branch_factor():
assert exp_polar(2.0*I*pi).extract_branch_factor() == (1, 1)
def test_identity_removal():
assert Add.make_args(x + 0) == (x,)
assert Mul.make_args(x*1) == (x,)
def test_float_0():
assert Float(0.0) + 1 == Float(1.0)
@XFAIL
def test_float_0_fail():
assert Float(0.0)*x == Float(0.0)
assert (x + Float(0.0)).is_Add
def test_issue_6325():
ans = (b**2 + z**2 - (b*(a + b*t) + z*(c + t*z))**2/(
(a + b*t)**2 + (c + t*z)**2))/sqrt((a + b*t)**2 + (c + t*z)**2)
e = sqrt((a + b*t)**2 + (c + z*t)**2)
assert diff(e, t, 2) == ans
e.diff(t, 2) == ans
assert diff(e, t, 2, simplify=False) != ans
def test_issue_7426():
f1 = a % c
f2 = x % z
assert f1.equals(f2) == False
|
bsd-3-clause
|
gh1026/linux-3.4
|
Documentation/networking/cxacru-cf.py
|
14668
|
1626
|
#!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
|
gpl-2.0
|
marvin-jens/clip_analysis
|
scorethresh.py
|
1
|
1049
|
#!/usr/bin/env python
# Simple script to filter lines of the input for meeting a numeric
# threshold on one, or one of multiple (comma-separated) column numbers.
# If threshold is required on each column, you can chain multiple
# scorethresh.py calls with pipes.
# by default, the column value has to be larger or equal to the threshold.
# If the column number is negative, this changes to less or equal.
import sys
cols = [int(col) for col in sys.argv[1].split(",")]
flags = [(col >= 0)*2 -1 for col in cols]
cols = [abs(c)-1 for c in cols]
threshs = [float(th) for th in sys.argv[2].split(",")]*len(cols)
for line in sys.stdin:
if line.startswith("#"):
print line,
continue
data = line.split("\t") + [0,]
values = [data[col] for col in cols]
valid = False
for value,flag,thresh in zip(values,flags,threshs):
try:
value = float(value)
except ValueError:
continue
if value * flag >= thresh * flag:
valid = True
if valid:
print line,
|
gpl-3.0
|
pap/rethinkdb
|
test/rql_test/connections/http_support/werkzeug/wsgi.py
|
146
|
37745
|
# -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import posixpath
import mimetypes
from itertools import chain
from zlib import adler32
from time import time, mktime
from datetime import datetime
from functools import partial, update_wrapper
from werkzeug._compat import iteritems, text_type, string_types, \
implements_iterator, make_literal_wrapper, to_unicode, to_bytes, \
wsgi_get_bytes, try_coerce_native, PY2
from werkzeug._internal import _empty_stream, _encode_idna
from werkzeug.http import is_resource_modified, http_date
from werkzeug.urls import uri_to_iri, url_quote, url_parse, url_join
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False, trusted_hosts=None):
"""A handy helper function that recreates the full URL as IRI for the
current request or parts of it. Here an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
Note that the string returned might contain unicode characters as the
representation is an IRI not an URI. If you need an ASCII only
representation you can use the :func:`~werkzeug.urls.iri_to_uri`
function:
>>> from werkzeug.urls import iri_to_uri
>>> iri_to_uri(get_current_url(env))
'http://localhost/script/?param=foo'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(''.join(tmp) + '/')
cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/'))
cat('/')
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/')))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat('?' + qs)
return uri_to_iri(''.join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ':' in hostname:
hostname = hostname.rsplit(':', 1)[0]
return _encode_idna(hostname)
hostname = _normalize(hostname)
for ref in trusted_list:
if ref.startswith('.'):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
ref = _normalize(ref)
if ref == hostname:
return True
if suffix_match and hostname.endswith('.' + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the real host for the given WSGI environment. This takes care
of the `X-Forwarded-Host` header. Optionally it verifies that the host
is in a list of trusted hosts. If the host is not in there it will raise
a :exc:`~werkzeug.exceptions.SecurityError`.
:param environ: the WSGI environment to get the host of.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
if 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST'].split(',')[0].strip()
elif 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from werkzeug.exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available `None` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe: indicates weather the function should use an empty
stream as safe fallback or just return the original
WSGI input stream if it can't wrap it safely. The
default is to return an empty string in those cases.
"""
stream = environ['wsgi.input']
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can savely
# read it until the end.
if environ.get('wsgi.input_terminated'):
return stream
# If we don't have a content length we fall back to an empty stream
# in case of a safe fallback, otherwise we return the stream unchanged.
# The non-safe fallback is not recommended but might be useful in
# some situations.
if content_length is None:
return safe_fallback and _empty_stream or stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get('QUERY_STRING', ''))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),'))
def get_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset='utf-8', errors='replace'):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True)
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:Param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = {}
self.cache = cache
self.cache_timeout = cache_timeout
for key, value in iteritems(exports):
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports[key] = loader
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
return basename, lambda: (
provider.get_resource_stream(manager, path),
loadtime,
0
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(sys.getfilesystemencoding())
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
cleaned_path = get_path_info(environ)
if PY2:
cleaned_path = cleaned_path.encode(sys.getfilesystemencoding())
# sanitize the path for non unix systems
cleaned_path = cleaned_path.strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/'.join([''] + [x for x in cleaned_path.split('/')
if x and x != '..'])
file_loader = None
for search_path, loader in iteritems(self.exports):
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
items = script.split('/')
script = '/'.join(items[:-1])
path_info = '/%s%s' % (items[-1], path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of an iterator. Because it is useful to add
another close action to a returned iterator and adding a custom iterator
is a boring task this class can be used for that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterator, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError('Passed a string or byte object instead of '
'true iterator or stream.')
if not hasattr(stream, 'read'):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s('')
cr = s('\r')
lf = s('\n')
crlf = s('\r\n')
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
new_buf = []
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
else:
new_buf.append(item)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(object):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
|
agpl-3.0
|
pku9104038/edx-platform
|
lms/djangoapps/instructor_task/views.py
|
69
|
10964
|
import json
import logging
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from celery.states import FAILURE, REVOKED, READY_STATES
from instructor_task.api_helper import (get_status_from_instructor_task,
get_updated_instructor_task)
from instructor_task.models import PROGRESS
log = logging.getLogger(__name__)
# return status for completed tasks and tasks in progress
STATES_WITH_STATUS = [state for state in READY_STATES] + [PROGRESS]
def _get_instructor_task_status(task_id):
"""
Returns status for a specific task.
Written as an internal method here (rather than as a helper)
so that get_task_completion_info() can be called without
causing a circular dependency (since it's also called directly).
"""
instructor_task = get_updated_instructor_task(task_id)
status = get_status_from_instructor_task(instructor_task)
if instructor_task is not None and instructor_task.task_state in STATES_WITH_STATUS:
succeeded, message = get_task_completion_info(instructor_task)
status['message'] = message
status['succeeded'] = succeeded
return status
def instructor_task_status(request):
"""
View method that returns the status of a course-related task or tasks.
Status is returned as a JSON-serialized dict, wrapped as the content of a HTTPResponse.
The task_id can be specified to this view in one of two ways:
* by making a request containing 'task_id' as a parameter with a single value
Returns a dict containing status information for the specified task_id
* by making a request containing 'task_ids' as a parameter,
with a list of task_id values.
Returns a dict of dicts, with the task_id as key, and the corresponding
dict containing status information for the specified task_id
Task_id values that are unrecognized are skipped.
The dict with status information for a task contains the following keys:
'message': on complete tasks, status message reporting on final progress,
or providing exception message if failed. For tasks in progress,
indicates the current progress.
'succeeded': on complete tasks or tasks in progress, boolean value indicates if the
task outcome was successful: did it achieve what it set out to do.
This is in contrast with a successful task_state, which indicates that the
task merely completed.
'task_id': id assigned by LMS and used by celery.
'task_state': state of task as stored in celery's result store.
'in_progress': boolean indicating if task is still running.
'task_progress': dict containing progress information. This includes:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
'duration_ms': how long the task has (or had) been running.
'exception': name of exception class raised in failed tasks.
'message': returned for failed and revoked tasks.
'traceback': optional, returned if task failed and produced a traceback.
"""
output = {}
if 'task_id' in request.REQUEST:
task_id = request.REQUEST['task_id']
output = _get_instructor_task_status(task_id)
elif 'task_ids[]' in request.REQUEST:
tasks = request.REQUEST.getlist('task_ids[]')
for task_id in tasks:
task_output = _get_instructor_task_status(task_id)
if task_output is not None:
output[task_id] = task_output
return HttpResponse(json.dumps(output, indent=4))
def get_task_completion_info(instructor_task):
"""
Construct progress message from progress information in InstructorTask entry.
Returns (boolean, message string) duple, where the boolean indicates
whether the task completed without incident. (It is possible for a
task to attempt many sub-tasks, such as rescoring many students' problem
responses, and while the task runs to completion, some of the students'
responses could not be rescored.)
Used for providing messages to instructor_task_status(), as well as
external calls for providing course task submission history information.
"""
succeeded = False
if instructor_task.task_state not in STATES_WITH_STATUS:
return (succeeded, _("No status information available"))
# we're more surprised if there is no output for a completed task, but just warn:
if instructor_task.task_output is None:
log.warning(_("No task_output information found for instructor_task {0}").format(instructor_task.task_id))
return (succeeded, _("No status information available"))
try:
task_output = json.loads(instructor_task.task_output)
except ValueError:
fmt = _("No parsable task_output information found for instructor_task {0}: {1}")
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output))
return (succeeded, _("No parsable status information available"))
if instructor_task.task_state in [FAILURE, REVOKED]:
return (succeeded, task_output.get('message', _('No message provided')))
if any([key not in task_output for key in ['action_name', 'attempted', 'total']]):
fmt = _("Invalid task_output information found for instructor_task {0}: {1}")
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_output))
return (succeeded, _("No progress status information available"))
action_name = _(task_output['action_name'])
num_attempted = task_output['attempted']
num_total = task_output['total']
# In earlier versions of this code, the key 'updated' was used instead of
# (the more general) 'succeeded'. In order to support history that may contain
# output with the old key, we check for values with both the old and the current
# key, and simply sum them.
num_succeeded = task_output.get('updated', 0) + task_output.get('succeeded', 0)
num_skipped = task_output.get('skipped', 0)
student = None
problem_url = None
email_id = None
try:
task_input = json.loads(instructor_task.task_input)
except ValueError:
fmt = _("No parsable task_input information found for instructor_task {0}: {1}")
log.warning(fmt.format(instructor_task.task_id, instructor_task.task_input))
else:
student = task_input.get('student')
problem_url = task_input.get('problem_url')
email_id = task_input.get('email_id')
if instructor_task.task_state == PROGRESS:
# special message for providing progress updates:
# Translators: {action} is a past-tense verb that is localized separately. {attempted} and {succeeded} are counts.
msg_format = _("Progress: {action} {succeeded} of {attempted} so far")
elif student is not None and problem_url is not None:
# this reports on actions on problems for a particular student:
if num_attempted == 0:
# Translators: {action} is a past-tense verb that is localized separately. {student} is a student identifier.
msg_format = _("Unable to find submission to be {action} for student '{student}'")
elif num_succeeded == 0:
# Translators: {action} is a past-tense verb that is localized separately. {student} is a student identifier.
msg_format = _("Problem failed to be {action} for student '{student}'")
else:
succeeded = True
# Translators: {action} is a past-tense verb that is localized separately. {student} is a student identifier.
msg_format = _("Problem successfully {action} for student '{student}'")
elif student is None and problem_url is not None:
# this reports on actions on problems for all students:
if num_attempted == 0:
# Translators: {action} is a past-tense verb that is localized separately.
msg_format = _("Unable to find any students with submissions to be {action}")
elif num_succeeded == 0:
# Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count.
msg_format = _("Problem failed to be {action} for any of {attempted} students")
elif num_succeeded == num_attempted:
succeeded = True
# Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count.
msg_format = _("Problem successfully {action} for {attempted} students")
else: # num_succeeded < num_attempted
# Translators: {action} is a past-tense verb that is localized separately. {succeeded} and {attempted} are counts.
msg_format = _("Problem {action} for {succeeded} of {attempted} students")
elif email_id is not None:
# this reports on actions on bulk emails
if num_attempted == 0:
# Translators: {action} is a past-tense verb that is localized separately.
msg_format = _("Unable to find any recipients to be {action}")
elif num_succeeded == 0:
# Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count.
msg_format = _("Message failed to be {action} for any of {attempted} recipients ")
elif num_succeeded == num_attempted:
succeeded = True
# Translators: {action} is a past-tense verb that is localized separately. {attempted} is a count.
msg_format = _("Message successfully {action} for {attempted} recipients")
else: # num_succeeded < num_attempted
# Translators: {action} is a past-tense verb that is localized separately. {succeeded} and {attempted} are counts.
msg_format = _("Message {action} for {succeeded} of {attempted} recipients")
else:
# provide a default:
# Translators: {action} is a past-tense verb that is localized separately. {succeeded} and {attempted} are counts.
msg_format = _("Status: {action} {succeeded} of {attempted}")
if num_skipped > 0:
# Translators: {skipped} is a count. This message is appended to task progress status messages.
msg_format += _(" (skipping {skipped})")
if student is None and num_attempted != num_total:
# Translators: {total} is a count. This message is appended to task progress status messages.
msg_format += _(" (out of {total})")
# Update status in task result object itself:
message = msg_format.format(
action=action_name,
succeeded=num_succeeded,
attempted=num_attempted,
total=num_total,
skipped=num_skipped,
student=student
)
return (succeeded, message)
|
agpl-3.0
|
maestrano/odoo
|
addons/stock_invoice_directly/stock_invoice_directly.py
|
337
|
2132
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import api
from openerp.osv import osv
from openerp.tools.translate import _
class stock_picking(osv.osv):
_inherit = 'stock.picking'
@api.cr_uid_ids_context
def do_transfer(self, cr, uid, picking_ids, context=None):
"""Launch Create invoice wizard if invoice state is To be Invoiced,
after processing the picking.
"""
if context is None:
context = {}
res = super(stock_picking, self).do_transfer(cr, uid, picking_ids, context=context)
pick_ids = [p.id for p in self.browse(cr, uid, picking_ids, context) if p.invoice_state == '2binvoiced']
if pick_ids:
context = dict(context, active_model='stock.picking', active_ids=pick_ids)
return {
'name': _('Create Invoice'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.invoice.onshipping',
'type': 'ir.actions.act_window',
'target': 'new',
'context': context
}
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
BBN-Q/Auspex
|
src/auspex/instruments/kepco.py
|
1
|
1699
|
# Copyright 2016 Raytheon BBN Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
__all__ = ['BOP2020M']
from auspex.log import logger
from .instrument import SCPIInstrument, StringCommand, RampCommand
class BOP2020M(SCPIInstrument):
"""For controlling the BOP2020M power supply via GPIB interface card"""
output = StringCommand(scpi_string="OUTPUT", value_map={True: '1', False: '0'})
current = RampCommand(increment=0.1, pause=20e-3, get_string=":CURR?", set_string=":CURR:LEV:IMM {:g}", value_range=(-20,20))
voltage = RampCommand(increment=0.1, pause=20e-3, get_string=":VOLT?", set_string=":VOLT:LEV:IMM {:g}", value_range=(-20,20))
mode = StringCommand(scpi_string="FUNC:MODE", value_map={'voltage': "VOLT", 'current': "CURR"})
def __init__(self, name, resource_name, mode='current', **kwargs):
super(BOP2020M, self).__init__(name, resource_name, **kwargs)
self.name = "BOP2020M power supply"
self.interface._resource.write_termination = u"\n"
self.interface._resource.read_termination = u"\n"
self.mode = 'current'
self.interface.write('VOLT MAX')
self.output = True
def shutdown(self):
if self.output:
if self.current != 0.0:
for i in np.linspace(self.current, 0.0, 20):
self.current = i
if self.voltage != 0.0:
for v in np.linspace(self.voltage, 0.0, 20):
self.voltage = v
self.output = False
|
apache-2.0
|
krafczyk/spack
|
var/spack/repos/builtin/packages/p4est/package.py
|
5
|
2340
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class P4est(AutotoolsPackage):
"""Dynamic management of a collection (a forest) of adaptive octrees in
parallel"""
homepage = "http://www.p4est.org"
url = "http://p4est.github.io/release/p4est-1.1.tar.gz"
maintainers = ['davydden']
version('2.0', 'c522c5b69896aab39aa5a81399372a19a6b03fc6200d2d5d677d9a22fe31029a')
version('1.1', '37ba7f4410958cfb38a2140339dbf64f')
# build dependencies
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool@2.4.2:', type='build')
# other dependencies
depends_on('mpi')
depends_on('zlib')
def configure_args(self):
return [
'--enable-mpi',
'--enable-shared',
'--disable-vtk-binary',
'--without-blas',
'CPPFLAGS=-DSC_LOG_PRIORITY=SC_LP_ESSENTIAL',
'CFLAGS=-O2',
'CC=%s' % self.spec['mpi'].mpicc,
'CXX=%s' % self.spec['mpi'].mpicxx,
'FC=%s' % self.spec['mpi'].mpifc,
'F77=%s' % self.spec['mpi'].mpif77
]
|
lgpl-2.1
|
dischinator/pyload
|
module/plugins/crypter/ImgurCom.py
|
3
|
5831
|
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleCrypter import SimpleCrypter
from module.plugins.internal.misc import json, uniqify
class ImgurCom(SimpleCrypter):
__name__ = "ImgurCom"
__type__ = "crypter"
__version__ = "0.59"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.|m\.)?imgur\.com/(a|gallery|)/?\w{5,7}'
__config__ = [("activated" , "bool" , "Activated" , True ),
("use_premium" , "bool" , "Use premium account if available" , True ),
("folder_per_package", "Default;Yes;No", "Create folder for each package" , "Default"),
("max_wait" , "int" , "Reconnect if waiting time is greater than minutes", 10 )]
__description__ = """Imgur.com decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("nath_schwarz", "nathan.notwhite@gmail.com" ),
("nippey", "matthias.nippert@gmail.com")]
NAME_PATTERN = r'(?P<N>.+?) - .*?Imgur'
LINK_PATTERN = r'i\.imgur\.com/\w{7}s?\.(?:jpeg|jpg|png|gif|apng)'
""" Imgur may only show the first 10 images of a gallery. The remaining bits may be found here: """
GALLERY_JSON = "http://imgur.com/ajaxalbums/getimages/%s/hit.json?all=true"
def sanitize(self,name):
""" Turn Imgur Gallery title into a safe Package and Folder name """
keepcharacters = (' ','\t','.','_')
replacecharacters = (' ','\t')
return "".join(c if c not in replacecharacters else '_' for c in name.strip() if c.isalnum() or c in keepcharacters).strip('_')
def get_ids_from_json(self):
""" Check the embedded JSON and if needed the external JSON for more images """
# Greedy re should match the closing bracket of json assuming JSON data is placed on a single line
m = re.search(r"\simage\s+:\s+({.*})", self.data)
if m:
embedded_json = json.loads(m.group(1))
# Extract some metadata (ID, Title, NumImages)
gallery_id = embedded_json['hash']
self.gallery_name = self.sanitize(_("%s_%s") % (gallery_id, embedded_json['title']))
self.total_num_images = int(embedded_json['num_images'])
# Extract images
images = dict([(e['hash'], e['ext']) for e in embedded_json['album_images']['images']])
self.log_debug(_("Found %s of %s expected links in embedded JSON") % (len(images), self.total_num_images))
# Depeding on the gallery, the embedded JSON may not contain all image information, then we also try the external JSON
# If this doesn't help either (which is possible),... TODO: Find out what to do
if len(images) < self.total_num_images:
external_json = json.loads(self.load(self.GALLERY_JSON % gallery_id))
try:
images = dict([(e['hash'], e['ext']) for e in external_json['data']['images']])
self.log_debug(_("Found %s of %s expected links in external JSON") % (len(images), self.total_num_images))
except (KeyError, TypeError):
self.log_debug(_("Could not extract links from external JSON"))
# It is possible that the returned JSON contains an empty 'data' section. We ignore it then.
return images
self.log_debug(_("Could not find embedded JSON"))
return {}
def get_indirect_links(self, links_direct):
""" Try to find a list of all images and add those we didn't find already """
# Extract IDs of known direct links
ids_direct = set([l for link in links_direct for l in re.findall(r'(\w{7})', link)])
# Get filename extensions for new IDs
ids_json = self.get_ids_from_json()
ids_indirect = [id for id in ids_json.keys() if id not in ids_direct]
# No additional images found
if len(ids_indirect) == 0 :
return []
# Translate new IDs to Direct-URLs
return map(lambda id: "http://i.imgur.com/%s%s" % (id, ids_json[id]), ids_indirect)
def setup(self):
self.gallery_name = None
self.total_num_images = 0
def get_links(self):
""" Extract embedded links from HTML // then check if there are further images which will be lazy-loaded """
f = lambda url: "http://" + re.sub(r'(\w{7})s\.', r'\1.', url)
direct_links = uniqify(map(f, re.findall(self.LINK_PATTERN, self.data)))
# Imgur Galleryies may contain more images than initially shown. Find the rest now!
try:
indirect_links = self.get_indirect_links(direct_links)
self.log_debug(_("Found %s additional links") % (len(indirect_links)))
except (TypeError, KeyError, ValueError), e:
# Fail gracefull as we already had some success
self.log_error(_("Processing of additional links unsuccessful - %s: %s") % (type(e).__name__, str(e)))
indirect_links = []
# Check if all images were found and inform the user
num_images_found = len(direct_links) + len(indirect_links)
if num_images_found < self.total_num_images:
self.log_error(_("Could not save all images of this gallery: %s/%s") % (num_images_found, self.total_num_images))
# If we could extract a name, use this to create a specific package
if self.gallery_name:
self.packages.append((self.gallery_name, direct_links + indirect_links, self.gallery_name))
return []
else:
return direct_links + indirect_links
|
gpl-3.0
|
usc-isi/horizon-old
|
horizon/horizon/dashboards/syspanel/images/forms.py
|
2
|
3258
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django import shortcuts
from django.contrib import messages
from django.utils.translation import ugettext as _
from glance.common import exception as glance_exception
from horizon import api
from horizon import forms
LOG = logging.getLogger(__name__)
class DeleteImage(forms.SelfHandlingForm):
image_id = forms.CharField(required=True)
def handle(self, request, data):
image_id = data['image_id']
try:
api.image_delete(request, image_id)
except glance_exception.ClientConnectionError, e:
LOG.exception("Error connecting to glance")
messages.error(request,
_("Error connecting to glance: %s") % e.message)
except glance_exception.Error, e:
LOG.exception('Error deleting image with id "%s"' % image_id)
messages.error(request, _("Error deleting image: %s") % e.message)
return shortcuts.redirect(request.build_absolute_uri())
class ToggleImage(forms.SelfHandlingForm):
image_id = forms.CharField(required=True)
def handle(self, request, data):
image_id = data['image_id']
try:
api.image_update(request, image_id,
image_meta={'is_public': False})
except glance_exception.ClientConnectionError, e:
LOG.exception("Error connecting to glance")
messages.error(request,
_("Error connecting to glance: %s") % e.message)
except glance_exception.Error, e:
LOG.exception('Error updating image with id "%s"' % image_id)
messages.error(request, _("Error updating image: %s") % e.message)
return shortcuts.redirect(request.build_absolute_uri())
class UpdateImageForm(forms.Form):
name = forms.CharField(max_length="25", label=_("Name"))
kernel = forms.CharField(max_length="25", label=_("Kernel ID"),
required=False)
ramdisk = forms.CharField(max_length="25", label=_("Ramdisk ID"),
required=False)
architecture = forms.CharField(label=_("Architecture"), required=False)
#project_id = forms.CharField(label=_("Project ID"))
container_format = forms.CharField(label=_("Container Format"),
required=False)
disk_format = forms.CharField(label=_("Disk Format"))
#is_public = forms.BooleanField(label=_("Publicly Available"),
# required=False)
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.