code
stringlengths 1
199k
|
|---|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from enum import Enum
class BaseManager(models.Manager):
def get_or_404(self, *args, **kwargs):
"""
Returns instance or raises 404.
"""
try:
instance = super(BaseManager, self).get(*args, **kwargs)
except self.model.DoesNotExist:
from .exceptions import NotFoundException
raise NotFoundException
return instance
class BaseModel(models.Model):
created_at = models.DateTimeField(
verbose_name=_("Created at"),
auto_now_add=True,
blank=False,
null=False,
help_text=_("Model was created at this time."),
)
updated_at = models.DateTimeField(
verbose_name=_("Updated at"),
auto_now=True,
blank=False,
null=False,
help_text=_("Model was updated at this time."),
)
objects = BaseManager()
class Meta:
abstract = True
class BaseChoiceEnum(Enum):
def __str__(self):
return self.name
@classmethod
def as_choices(cls):
return [(tag.name, tag.value) for tag in cls]
@classmethod
def has_value(cls, value):
return value in cls._value2member_map_
@classmethod
def get_name_for_value(cls, value, default=None):
status = cls._value2member_map_.get(value, default)
return status.name if status else default
class SerializableEnum(BaseChoiceEnum):
"""
This is a Enum, that is serializable.
To serialize this, use `from django_sonic_screwdriver.json import stringify_keys`.
"""
def json_repr(self):
return self.value
class ErrorCodes(Enum):
def __str__(self):
return str(self.value)
|
""" Module to take a water_level reading."""
try:
import ConfigParser as configparser # Python2
except ImportError:
import configparser # Python3
from hcsr04sensor import sensor
from raspisump import log, alerts, heartbeat
config = configparser.RawConfigParser()
config.read("/home/pi/raspi-sump/raspisump.conf")
configs = {
"critical_water_level": config.getint("pit", "critical_water_level"),
"pit_depth": config.getint("pit", "pit_depth"),
"temperature": config.getint("pit", "temperature"),
"trig_pin": config.getint("gpio_pins", "trig_pin"),
"echo_pin": config.getint("gpio_pins", "echo_pin"),
"unit": config.get("pit", "unit"),
}
try:
configs["alert_when"] = config.get("pit", "alert_when")
except configparser.NoOptionError:
configs["alert_when"] = "high"
try:
configs["heartbeat"] = config.getint("email", "heartbeat")
except configparser.NoOptionError:
configs["heartbeat"] = 0
def initiate_heartbeat():
"""Initiate the heartbeat email process if needed"""
if configs["heartbeat"] == 1:
heartbeat.determine_if_heartbeat()
else:
pass
def water_reading():
"""Initiate a water level reading."""
pit_depth = configs["pit_depth"]
trig_pin = configs["trig_pin"]
echo_pin = configs["echo_pin"]
temperature = configs["temperature"]
unit = configs["unit"]
value = sensor.Measurement(trig_pin, echo_pin, temperature, unit)
try:
raw_distance = value.raw_distance(sample_wait=0.3)
except SystemError:
log.log_errors(
"**ERROR - Signal not received. Possible cable or sensor problem."
)
exit(0)
return round(value.depth(raw_distance, pit_depth), 1)
def water_depth():
"""Determine the depth of the water, log result and generate alert
if needed.
"""
critical_water_level = configs["critical_water_level"]
water_depth = water_reading()
if water_depth < 0.0:
water_depth = 0.0
log.log_reading(water_depth)
if water_depth > critical_water_level and configs["alert_when"] == "high":
alerts.determine_if_alert(water_depth)
elif water_depth < critical_water_level and configs["alert_when"] == "low":
alerts.determine_if_alert(water_depth)
else:
pass
initiate_heartbeat()
|
"""Parser classes for Cheetah's Compiler
Classes:
ParseError( Exception )
_LowLevelParser( Cheetah.SourceReader.SourceReader ), basically a lexer
_HighLevelParser( _LowLevelParser )
Parser === _HighLevelParser (an alias)
Meta-Data
================================================================================
Author: Tavis Rudd <tavis@damnsimple.com>
Version: $Revision: 1.130 $
Start Date: 2001/08/01
Last Revision Date: $Date: 2006/06/21 23:49:14 $
"""
__author__ = "Tavis Rudd <tavis@damnsimple.com>"
__revision__ = "$Revision: 1.130 $"[11:-2]
import os
import sys
import re
from re import DOTALL, MULTILINE
from types import StringType, ListType, TupleType, ClassType, TypeType
import time
from tokenize import pseudoprog
import inspect
import new
import traceback
from Cheetah.SourceReader import SourceReader
from Cheetah import Filters
from Cheetah import ErrorCatchers
from Cheetah.Unspecified import Unspecified
def escapeRegexChars(txt,
escapeRE=re.compile(r'([\$\^\*\+\.\?\{\}\[\]\(\)\|\\])')):
"""Return a txt with all special regular expressions chars escaped."""
return escapeRE.sub(r'\\\1' , txt)
def group(*choices): return '(' + '|'.join(choices) + ')'
def nongroup(*choices): return '(?:' + '|'.join(choices) + ')'
def namedGroup(name, *choices): return '(P:<' + name +'>' + '|'.join(choices) + ')'
def any(*choices): return apply(group, choices) + '*'
def maybe(*choices): return apply(group, choices) + '?'
NO_CACHE = 0
STATIC_CACHE = 1
REFRESH_CACHE = 2
SET_LOCAL = 0
SET_GLOBAL = 1
SET_MODULE = 2
identchars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ_"
namechars = identchars + "0123456789"
powerOp = '**'
unaryArithOps = ('+', '-', '~')
binaryArithOps = ('+', '-', '/', '//','%')
shiftOps = ('>>','<<')
bitwiseOps = ('&','|','^')
assignOp = '='
augAssignOps = ('+=','-=','/=','*=', '**=','^=','%=',
'>>=','<<=','&=','|=', )
assignmentOps = (assignOp,) + augAssignOps
compOps = ('<','>','==','!=','<=','>=', '<>', 'is', 'in',)
booleanOps = ('and','or','not')
operators = (powerOp,) + unaryArithOps + binaryArithOps \
+ shiftOps + bitwiseOps + assignmentOps \
+ compOps + booleanOps
delimeters = ('(',')','{','}','[',']',
',','.',':',';','=','`') + augAssignOps
keywords = ('and', 'del', 'for', 'is', 'raise',
'assert', 'elif', 'from', 'lambda', 'return',
'break', 'else', 'global', 'not', 'try',
'class', 'except', 'if', 'or', 'while',
'continue', 'exec', 'import', 'pass',
'def', 'finally', 'in', 'print',
)
single3 = "'''"
double3 = '"""'
tripleQuotedStringStarts = ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"u'''", 'u"""', "U'''", 'U"""',
"ur'''", 'ur"""', "Ur'''", 'Ur"""',
"uR'''", 'uR"""', "UR'''", 'UR"""')
tripleQuotedStringPairs = {"'''": single3, '"""': double3,
"r'''": single3, 'r"""': double3,
"u'''": single3, 'u"""': double3,
"ur'''": single3, 'ur"""': double3,
"R'''": single3, 'R"""': double3,
"U'''": single3, 'U"""': double3,
"uR'''": single3, 'uR"""': double3,
"Ur'''": single3, 'Ur"""': double3,
"UR'''": single3, 'UR"""': double3,
}
closurePairs= {')':'(',']':'[','}':'{'}
closurePairsRev= {'(':')','[':']','{':'}'}
tripleQuotedStringREs = {}
def makeTripleQuoteRe(start, end):
start = escapeRegexChars(start)
end = escapeRegexChars(end)
return re.compile(r'(?:' + start + r').*?' + r'(?:' + end + r')', re.DOTALL)
for start, end in tripleQuotedStringPairs.items():
tripleQuotedStringREs[start] = makeTripleQuoteRe(start, end)
WS = r'[ \f\t]*'
EOL = r'\r\n|\n|\r'
EOLZ = EOL + r'|\Z'
escCharLookBehind = nongroup(r'(?<=\A)',r'(?<!\\)')
nameCharLookAhead = r'(?=[A-Za-z_])'
identRE=re.compile(r'[a-zA-Z_][a-zA-Z_0-9]*')
EOLre=re.compile(r'(?:\r\n|\r|\n)')
specialVarRE=re.compile(r'([a-zA-z_]+)@') # for matching specialVar comments
directiveNamesAndParsers = {
# importing and inheritance
'import':None,
'from':None,
'extends': 'eatExtends',
'implements': 'eatImplements',
# output, filtering, and caching
'slurp': 'eatSlurp',
'raw': 'eatRaw',
'include': 'eatInclude',
'cache': 'eatCache',
'filter': 'eatFilter',
'echo': None,
'silent': None,
'call': 'eatCall',
'arg': 'eatCallArg',
'capture': 'eatCapture',
# declaration, assignment, and deletion
'attr': 'eatAttr',
'def': 'eatDef',
'block': 'eatBlock',
'@': 'eatDecorator',
'defmacro': 'eatDefMacro',
'closure': 'eatClosure',
'set': 'eatSet',
'del': None,
# flow control
'if': 'eatIf',
'while': None,
'for': None,
'else': None,
'elif': None,
'pass': None,
'break': None,
'continue': None,
'stop': None,
'return': None,
'yield': None,
# little wrappers
'repeat': None,
'unless': None,
# error handling
'assert': None,
'raise': None,
'try': None,
'except': None,
'finally': None,
'errorCatcher': 'eatErrorCatcher',
# intructions to the parser and compiler
'breakpoint': 'eatBreakPoint',
'compiler': 'eatCompiler',
'compiler-settings': 'eatCompilerSettings',
# misc
'shBang': 'eatShbang',
'encoding': 'eatEncoding',
'end': 'eatEndDirective',
}
endDirectiveNamesAndHandlers = {
'def': 'handleEndDef', # has short-form
'block': None, # has short-form
'closure': None, # has short-form
'cache': None, # has short-form
'call': None, # has short-form
'capture': None, # has short-form
'filter': None,
'errorCatcher':None,
'while': None, # has short-form
'for': None, # has short-form
'if': None, # has short-form
'try': None, # has short-form
'repeat': None, # has short-form
'unless': None, # has short-form
}
class ParseError(ValueError):
def __init__(self, stream, msg='Invalid Syntax', extMsg='', lineno=None, col=None):
self.stream = stream
if stream.pos() >= len(stream):
stream.setPos(len(stream) -1)
self.msg = msg
self.extMsg = extMsg
self.lineno = lineno
self.col = col
def __str__(self):
return self.report()
def report(self):
stream = self.stream
if stream.filename():
f = " in file %s" % stream.filename()
else:
f = ''
report = ''
if self.lineno:
lineno = self.lineno
row, col, line = (lineno, (self.col or 0),
self.stream.splitlines()[lineno-1])
else:
row, col, line = self.stream.getRowColLine()
## get the surrounding lines
lines = stream.splitlines()
prevLines = [] # (rowNum, content)
for i in range(1,4):
if row-1-i <=0:
break
prevLines.append( (row-i,lines[row-1-i]) )
nextLines = [] # (rowNum, content)
for i in range(1,4):
if not row-1+i < len(lines):
break
nextLines.append( (row+i,lines[row-1+i]) )
nextLines.reverse()
## print the main message
report += "\n\n%s\n" %self.msg
report += "Line %i, column %i%s\n\n" % (row, col, f)
report += 'Line|Cheetah Code\n'
report += '----|-------------------------------------------------------------\n'
while prevLines:
lineInfo = prevLines.pop()
report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]}
report += "%(row)-4d|%(line)s\n"% {'row':row, 'line':line}
report += ' '*5 +' '*(col-1) + "^\n"
while nextLines:
lineInfo = nextLines.pop()
report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]}
## add the extra msg
if self.extMsg:
report += self.extMsg + '\n'
return report
class ForbiddenSyntax(ParseError): pass
class ForbiddenExpression(ForbiddenSyntax): pass
class ForbiddenDirective(ForbiddenSyntax): pass
class CheetahVariable:
def __init__(self, nameChunks, useNameMapper=True, cacheToken=None,
rawSource=None):
self.nameChunks = nameChunks
self.useNameMapper = useNameMapper
self.cacheToken = cacheToken
self.rawSource = rawSource
class Placeholder(CheetahVariable): pass
class ArgList:
"""Used by _LowLevelParser.getArgList()"""
def __init__(self):
self.argNames = []
self.defVals = []
self.i = 0
def addArgName(self, name):
self.argNames.append( name )
self.defVals.append( None )
def next(self):
self.i += 1
def addToDefVal(self, token):
i = self.i
if self.defVals[i] == None:
self.defVals[i] = ''
self.defVals[i] += token
def merge(self):
defVals = self.defVals
for i in range(len(defVals)):
if type(defVals[i]) == StringType:
defVals[i] = defVals[i].strip()
return map(None, [i.strip() for i in self.argNames], defVals)
def __str__(self):
return str(self.merge())
class _LowLevelParser(SourceReader):
"""This class implements the methods to match or extract ('get*') the basic
elements of Cheetah's grammar. It does NOT handle any code generation or
state management.
"""
_settingsManager = None
def setSettingsManager(self, settingsManager):
self._settingsManager = settingsManager
def setting(self, key, default=Unspecified):
if default is Unspecified:
return self._settingsManager.setting(key)
else:
return self._settingsManager.setting(key, default=default)
def setSetting(self, key, val):
self._settingsManager.setSetting(key, val)
def settings(self):
return self._settingsManager.settings()
def updateSettings(self, settings):
self._settingsManager.updateSettings(settings)
def _initializeSettings(self):
self._settingsManager._initializeSettings()
def configureParser(self):
"""Is called by the Compiler instance after the parser has had a
settingsManager assigned with self.setSettingsManager()
"""
self._makeCheetahVarREs()
self._makeCommentREs()
self._makeDirectiveREs()
self._makePspREs()
self._possibleNonStrConstantChars = (
self.setting('commentStartToken')[0] +
self.setting('multiLineCommentStartToken')[0] +
self.setting('cheetahVarStartToken')[0] +
self.setting('directiveStartToken')[0] +
self.setting('PSPStartToken')[0])
self._nonStrConstMatchers = [
self.matchCommentStartToken,
self.matchMultiLineCommentStartToken,
self.matchVariablePlaceholderStart,
self.matchExpressionPlaceholderStart,
self.matchDirective,
self.matchPSPStartToken,
self.matchEOLSlurpToken,
]
## regex setup ##
def _makeCheetahVarREs(self):
"""Setup the regexs for Cheetah $var parsing."""
num = r'[0-9\.]+'
interval = (r'(?P<interval>' +
num + r's|' +
num + r'm|' +
num + r'h|' +
num + r'd|' +
num + r'w|' +
num + ')'
)
cacheToken = (r'(?:' +
r'(?P<REFRESH_CACHE>\*' + interval + '\*)'+
'|' +
r'(?P<STATIC_CACHE>\*)' +
'|' +
r'(?P<NO_CACHE>)' +
')')
self.cacheTokenRE = re.compile(cacheToken)
silentPlaceholderToken = (r'(?:' +
r'(?P<SILENT>' +escapeRegexChars('!')+')'+
'|' +
r'(?P<NOT_SILENT>)' +
')')
self.silentPlaceholderTokenRE = re.compile(silentPlaceholderToken)
self.cheetahVarStartRE = re.compile(
escCharLookBehind +
r'(?P<startToken>'+escapeRegexChars(self.setting('cheetahVarStartToken'))+')'+
r'(?P<silenceToken>'+silentPlaceholderToken+')'+
r'(?P<cacheToken>'+cacheToken+')'+
r'(?P<enclosure>|(?:(?:\{|\(|\[)[ \t\f]*))' + # allow WS after enclosure
r'(?=[A-Za-z_])')
validCharsLookAhead = r'(?=[A-Za-z_\*!\{\(\[])'
self.cheetahVarStartToken = self.setting('cheetahVarStartToken')
self.cheetahVarStartTokenRE = re.compile(
escCharLookBehind +
escapeRegexChars(self.setting('cheetahVarStartToken'))
+validCharsLookAhead
)
self.cheetahVarInExpressionStartTokenRE = re.compile(
escapeRegexChars(self.setting('cheetahVarStartToken'))
+r'(?=[A-Za-z_])'
)
self.expressionPlaceholderStartRE = re.compile(
escCharLookBehind +
r'(?P<startToken>' + escapeRegexChars(self.setting('cheetahVarStartToken')) + ')' +
r'(?P<cacheToken>' + cacheToken + ')' +
#r'\[[ \t\f]*'
r'(?:\{|\(|\[)[ \t\f]*'
+ r'(?=[^\)\}\]])'
)
if self.setting('EOLSlurpToken'):
self.EOLSlurpRE = re.compile(
escapeRegexChars(self.setting('EOLSlurpToken'))
+ r'[ \t\f]*'
+ r'(?:'+EOL+')'
)
else:
self.EOLSlurpRE = None
def _makeCommentREs(self):
"""Construct the regex bits that are used in comment parsing."""
startTokenEsc = escapeRegexChars(self.setting('commentStartToken'))
self.commentStartTokenRE = re.compile(escCharLookBehind + startTokenEsc)
del startTokenEsc
startTokenEsc = escapeRegexChars(
self.setting('multiLineCommentStartToken'))
endTokenEsc = escapeRegexChars(
self.setting('multiLineCommentEndToken'))
self.multiLineCommentTokenStartRE = re.compile(escCharLookBehind +
startTokenEsc)
self.multiLineCommentEndTokenRE = re.compile(escCharLookBehind +
endTokenEsc)
def _makeDirectiveREs(self):
"""Construct the regexs that are used in directive parsing."""
startToken = self.setting('directiveStartToken')
endToken = self.setting('directiveEndToken')
startTokenEsc = escapeRegexChars(startToken)
endTokenEsc = escapeRegexChars(endToken)
validSecondCharsLookAhead = r'(?=[A-Za-z_@])'
reParts = [escCharLookBehind, startTokenEsc]
if self.setting('allowWhitespaceAfterDirectiveStartToken'):
reParts.append('[ \t]*')
reParts.append(validSecondCharsLookAhead)
self.directiveStartTokenRE = re.compile(''.join(reParts))
self.directiveEndTokenRE = re.compile(escCharLookBehind + endTokenEsc)
def _makePspREs(self):
"""Setup the regexs for PSP parsing."""
startToken = self.setting('PSPStartToken')
startTokenEsc = escapeRegexChars(startToken)
self.PSPStartTokenRE = re.compile(escCharLookBehind + startTokenEsc)
endToken = self.setting('PSPEndToken')
endTokenEsc = escapeRegexChars(endToken)
self.PSPEndTokenRE = re.compile(escCharLookBehind + endTokenEsc)
def isLineClearToStartToken(self, pos=None):
return self.isLineClearToPos(pos)
def matchTopLevelToken(self):
"""Returns the first match found from the following methods:
self.matchCommentStartToken
self.matchMultiLineCommentStartToken
self.matchVariablePlaceholderStart
self.matchExpressionPlaceholderStart
self.matchDirective
self.matchPSPStartToken
self.matchEOLSlurpToken
Returns None if no match.
"""
match = None
if self.peek() in self._possibleNonStrConstantChars:
for matcher in self._nonStrConstMatchers:
match = matcher()
if match:
break
return match
def matchPyToken(self):
match = pseudoprog.match(self.src(), self.pos())
if match and match.group() in tripleQuotedStringStarts:
TQSmatch = tripleQuotedStringREs[match.group()].match(self.src(), self.pos())
if TQSmatch:
return TQSmatch
return match
def getPyToken(self):
match = self.matchPyToken()
if match is None:
raise ParseError(self)
elif match.group() in tripleQuotedStringStarts:
raise ParseError(self, msg='Malformed triple-quoted string')
return self.readTo(match.end())
def matchEOLSlurpToken(self):
if self.EOLSlurpRE:
return self.EOLSlurpRE.match(self.src(), self.pos())
def getEOLSlurpToken(self):
match = self.matchEOLSlurpToken()
if not match:
raise ParseError(self, msg='Invalid EOL slurp token')
return self.readTo(match.end())
def matchCommentStartToken(self):
return self.commentStartTokenRE.match(self.src(), self.pos())
def getCommentStartToken(self):
match = self.matchCommentStartToken()
if not match:
raise ParseError(self, msg='Invalid single-line comment start token')
return self.readTo(match.end())
def matchMultiLineCommentStartToken(self):
return self.multiLineCommentTokenStartRE.match(self.src(), self.pos())
def getMultiLineCommentStartToken(self):
match = self.matchMultiLineCommentStartToken()
if not match:
raise ParseError(self, msg='Invalid multi-line comment start token')
return self.readTo(match.end())
def matchMultiLineCommentEndToken(self):
return self.multiLineCommentEndTokenRE.match(self.src(), self.pos())
def getMultiLineCommentEndToken(self):
match = self.matchMultiLineCommentEndToken()
if not match:
raise ParseError(self, msg='Invalid multi-line comment end token')
return self.readTo(match.end())
def getDottedName(self):
srcLen = len(self)
nameChunks = []
if not self.peek() in identchars:
raise ParseError(self)
while self.pos() < srcLen:
c = self.peek()
if c in namechars:
nameChunk = self.getIdentifier()
nameChunks.append(nameChunk)
elif c == '.':
if self.pos()+1 <srcLen and self.peek(1) in identchars:
nameChunks.append(self.getc())
else:
break
else:
break
return ''.join(nameChunks)
def matchIdentifier(self):
return identRE.match(self.src(), self.pos())
def getIdentifier(self):
match = self.matchIdentifier()
if not match:
raise ParseError(self, msg='Invalid identifier')
return self.readTo(match.end())
def matchOperator(self):
match = self.matchPyToken()
if match and match.group() not in operators:
match = None
return match
def getOperator(self):
match = self.matchOperator()
if not match:
raise ParseError(self, msg='Expected operator')
return self.readTo( match.end() )
def matchAssignmentOperator(self):
match = self.matchPyToken()
if match and match.group() not in assignmentOps:
match = None
return match
def getAssignmentOperator(self):
match = self.matchAssignmentOperator()
if not match:
raise ParseError(self, msg='Expected assignment operator')
return self.readTo( match.end() )
def matchDirective(self):
"""Returns False or the name of the directive matched.
"""
startPos = self.pos()
if not self.matchDirectiveStartToken():
return False
self.getDirectiveStartToken()
directiveName = self.matchDirectiveName()
self.setPos(startPos)
return directiveName
def matchDirectiveName(self, directiveNameChars=identchars+'0123456789-@'):
startPos = self.pos()
directives = self._directiveNamesAndParsers.keys()
possibleMatches = []
name = ''
while not self.atEnd():
c = self.getc()
if not c in directiveNameChars:
break
name += c
if name in directives:
possibleMatches.append(name)
possibleMatches.sort()
possibleMatches.reverse() # longest match first
directiveName = False
if possibleMatches:
directiveName = possibleMatches[0]
self.setPos(startPos)
return directiveName
def matchDirectiveStartToken(self):
return self.directiveStartTokenRE.match(self.src(), self.pos())
def getDirectiveStartToken(self):
match = self.matchDirectiveStartToken()
if not match:
raise ParseError(self, msg='Invalid directive start token')
return self.readTo(match.end())
def matchDirectiveEndToken(self):
return self.directiveEndTokenRE.match(self.src(), self.pos())
def getDirectiveEndToken(self):
match = self.matchDirectiveEndToken()
if not match:
raise ParseError(self, msg='Invalid directive end token')
return self.readTo(match.end())
def matchColonForSingleLineShortFormDirective(self):
if not self.atEnd() and self.peek()==':':
restOfLine = self[self.pos()+1:self.findEOL()]
restOfLine = restOfLine.strip()
if not restOfLine:
return False
elif self.commentStartTokenRE.match(restOfLine):
return False
else: # non-whitespace, non-commment chars found
return True
return False
def matchPSPStartToken(self):
return self.PSPStartTokenRE.match(self.src(), self.pos())
def matchPSPEndToken(self):
return self.PSPEndTokenRE.match(self.src(), self.pos())
def getPSPStartToken(self):
match = self.matchPSPStartToken()
if not match:
raise ParseError(self, msg='Invalid psp start token')
return self.readTo(match.end())
def getPSPEndToken(self):
match = self.matchPSPEndToken()
if not match:
raise ParseError(self, msg='Invalid psp end token')
return self.readTo(match.end())
def matchCheetahVarStart(self):
"""includes the enclosure and cache token"""
return self.cheetahVarStartRE.match(self.src(), self.pos())
def matchCheetahVarStartToken(self):
"""includes the enclosure and cache token"""
return self.cheetahVarStartTokenRE.match(self.src(), self.pos())
def matchCheetahVarInExpressionStartToken(self):
"""no enclosures or cache tokens allowed"""
return self.cheetahVarInExpressionStartTokenRE.match(self.src(), self.pos())
def matchVariablePlaceholderStart(self):
"""includes the enclosure and cache token"""
return self.cheetahVarStartRE.match(self.src(), self.pos())
def matchExpressionPlaceholderStart(self):
"""includes the enclosure and cache token"""
return self.expressionPlaceholderStartRE.match(self.src(), self.pos())
def getCheetahVarStartToken(self):
"""just the start token, not the enclosure or cache token"""
match = self.matchCheetahVarStartToken()
if not match:
raise ParseError(self, msg='Expected Cheetah $var start token')
return self.readTo( match.end() )
def getCacheToken(self):
try:
token = self.cacheTokenRE.match(self.src(), self.pos())
self.setPos( token.end() )
return token.group()
except:
raise ParseError(self, msg='Expected cache token')
def getSilentPlaceholderToken(self):
try:
token = self.silentPlaceholderTokenRE.match(self.src(), self.pos())
self.setPos( token.end() )
return token.group()
except:
raise ParseError(self, msg='Expected silent placeholder token')
def getTargetVarsList(self):
varnames = []
while not self.atEnd():
if self.peek() in ' \t\f':
self.getWhiteSpace()
elif self.peek() in '\r\n':
break
elif self.startswith(','):
self.advance()
elif self.startswith('in ') or self.startswith('in\t'):
break
#elif self.matchCheetahVarStart():
elif self.matchCheetahVarInExpressionStartToken():
self.getCheetahVarStartToken()
self.getSilentPlaceholderToken()
self.getCacheToken()
varnames.append( self.getDottedName() )
elif self.matchIdentifier():
varnames.append( self.getDottedName() )
else:
break
return varnames
def getCheetahVar(self, plain=False, skipStartToken=False):
"""This is called when parsing inside expressions. Cache tokens are only
valid in placeholders so this method discards any cache tokens found.
"""
if not skipStartToken:
self.getCheetahVarStartToken()
self.getSilentPlaceholderToken()
self.getCacheToken()
return self.getCheetahVarBody(plain=plain)
def getCheetahVarBody(self, plain=False):
# @@TR: this should be in the compiler
return self._compiler.genCheetahVar(self.getCheetahVarNameChunks(), plain=plain)
def getCheetahVarNameChunks(self):
"""
nameChunks = list of Cheetah $var subcomponents represented as tuples
[ (namemapperPart,autoCall,restOfName),
]
where:
namemapperPart = the dottedName base
autocall = where NameMapper should use autocalling on namemapperPart
restOfName = any arglist, index, or slice
If restOfName contains a call arglist (e.g. '(1234)') then autocall is
False, otherwise it defaults to True.
EXAMPLE
------------------------------------------------------------------------
if the raw CheetahVar is
$a.b.c[1].d().x.y.z
nameChunks is the list
[ ('a.b.c',True,'[1]'),
('d',False,'()'),
('x.y.z',True,''),
]
"""
chunks = []
while self.pos() < len(self):
rest = ''
autoCall = True
if not self.peek() in identchars + '.':
break
elif self.peek() == '.':
if self.pos()+1 < len(self) and self.peek(1) in identchars:
self.advance() # discard the period as it isn't needed with NameMapper
else:
break
dottedName = self.getDottedName()
if not self.atEnd() and self.peek() in '([':
if self.peek() == '(':
rest = self.getCallArgString()
else:
rest = self.getExpression(enclosed=True)
period = max(dottedName.rfind('.'), 0)
if period:
chunks.append( (dottedName[:period], autoCall, '') )
dottedName = dottedName[period+1:]
if rest and rest[0]=='(':
autoCall = False
chunks.append( (dottedName, autoCall, rest) )
return chunks
def getCallArgString(self,
enclosures=[], # list of tuples (char, pos), where char is ({ or [
useNameMapper=Unspecified):
""" Get a method/function call argument string.
This method understands *arg, and **kw
"""
# @@TR: this settings mangling should be removed
if useNameMapper is not Unspecified:
useNameMapper_orig = self.setting('useNameMapper')
self.setSetting('useNameMapper', useNameMapper)
if enclosures:
pass
else:
if not self.peek() == '(':
raise ParseError(self, msg="Expected '('")
startPos = self.pos()
self.getc()
enclosures = [('(', startPos),
]
argStringBits = ['(']
addBit = argStringBits.append
while 1:
if self.atEnd():
open = enclosures[-1][0]
close = closurePairsRev[open]
self.setPos(enclosures[-1][1])
raise ParseError(
self, msg="EOF was reached before a matching '" + close +
"' was found for the '" + open + "'")
c = self.peek()
if c in ")}]": # get the ending enclosure and break
if not enclosures:
raise ParseError(self)
c = self.getc()
open = closurePairs[c]
if enclosures[-1][0] == open:
enclosures.pop()
addBit(')')
break
else:
raise ParseError(self)
elif c in " \t\f\r\n":
addBit(self.getc())
elif self.matchCheetahVarInExpressionStartToken():
startPos = self.pos()
codeFor1stToken = self.getCheetahVar()
WS = self.getWhiteSpace()
if not self.atEnd() and self.peek() == '=':
nextToken = self.getPyToken()
if nextToken == '=':
endPos = self.pos()
self.setPos(startPos)
codeFor1stToken = self.getCheetahVar(plain=True)
self.setPos(endPos)
## finally
addBit( codeFor1stToken + WS + nextToken )
else:
addBit( codeFor1stToken + WS)
elif self.matchCheetahVarStart():
# it has syntax that is only valid at the top level
self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr()
else:
beforeTokenPos = self.pos()
token = self.getPyToken()
if token in ('{','(','['):
self.rev()
token = self.getExpression(enclosed=True)
token = self.transformToken(token, beforeTokenPos)
addBit(token)
if useNameMapper is not Unspecified:
self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above
return ''.join(argStringBits)
def getDefArgList(self, exitPos=None, useNameMapper=False):
""" Get an argument list. Can be used for method/function definition
argument lists or for #directive argument lists. Returns a list of
tuples in the form (argName, defVal=None) with one tuple for each arg
name.
These defVals are always strings, so (argName, defVal=None) is safe even
with a case like (arg1, arg2=None, arg3=1234*2), which would be returned as
[('arg1', None),
('arg2', 'None'),
('arg3', '1234*2'),
]
This method understands *arg, and **kw
"""
if self.peek() == '(':
self.advance()
else:
exitPos = self.findEOL() # it's a directive so break at the EOL
argList = ArgList()
onDefVal = False
# @@TR: this settings mangling should be removed
useNameMapper_orig = self.setting('useNameMapper')
self.setSetting('useNameMapper', useNameMapper)
while 1:
if self.atEnd():
self.setPos(enclosures[-1][1])
raise ParseError(
self, msg="EOF was reached before a matching ')'"+
" was found for the '('")
if self.pos() == exitPos:
break
c = self.peek()
if c == ")" or self.matchDirectiveEndToken():
break
elif c == ":":
break
elif c in " \t\f\r\n":
if onDefVal:
argList.addToDefVal(c)
self.advance()
elif c == '=':
onDefVal = True
self.advance()
elif c == ",":
argList.next()
onDefVal = False
self.advance()
elif self.startswith(self.cheetahVarStartToken) and not onDefVal:
self.advance(len(self.cheetahVarStartToken))
elif self.matchIdentifier() and not onDefVal:
argList.addArgName( self.getIdentifier() )
elif onDefVal:
if self.matchCheetahVarInExpressionStartToken():
token = self.getCheetahVar()
elif self.matchCheetahVarStart():
# it has syntax that is only valid at the top level
self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr()
else:
beforeTokenPos = self.pos()
token = self.getPyToken()
if token in ('{','(','['):
self.rev()
token = self.getExpression(enclosed=True)
token = self.transformToken(token, beforeTokenPos)
argList.addToDefVal(token)
elif c == '*' and not onDefVal:
varName = self.getc()
if self.peek() == '*':
varName += self.getc()
if not self.matchIdentifier():
raise ParseError(self)
varName += self.getIdentifier()
argList.addArgName(varName)
else:
raise ParseError(self)
self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above
return argList.merge()
def getExpressionParts(self,
enclosed=False,
enclosures=None, # list of tuples (char, pos), where char is ({ or [
pyTokensToBreakAt=None, # only works if not enclosed
useNameMapper=Unspecified,
):
""" Get a Cheetah expression that includes $CheetahVars and break at
directive end tokens, the end of an enclosure, or at a specified
pyToken.
"""
if useNameMapper is not Unspecified:
useNameMapper_orig = self.setting('useNameMapper')
self.setSetting('useNameMapper', useNameMapper)
if enclosures is None:
enclosures = []
srcLen = len(self)
exprBits = []
while 1:
if self.atEnd():
if enclosures:
open = enclosures[-1][0]
close = closurePairsRev[open]
self.setPos(enclosures[-1][1])
raise ParseError(
self, msg="EOF was reached before a matching '" + close +
"' was found for the '" + open + "'")
else:
break
c = self.peek()
if c in "{([":
exprBits.append(c)
enclosures.append( (c, self.pos()) )
self.advance()
elif enclosed and not enclosures:
break
elif c in "])}":
if not enclosures:
raise ParseError(self)
open = closurePairs[c]
if enclosures[-1][0] == open:
enclosures.pop()
exprBits.append(c)
else:
open = enclosures[-1][0]
close = closurePairsRev[open]
row, col = self.getRowCol()
self.setPos(enclosures[-1][1])
raise ParseError(
self, msg= "A '" + c + "' was found at line " + str(row) +
", col " + str(col) +
" before a matching '" + close +
"' was found\nfor the '" + open + "'")
self.advance()
elif c in " \f\t":
exprBits.append(self.getWhiteSpace())
elif self.matchDirectiveEndToken() and not enclosures:
break
elif c == "\\" and self.pos()+1 < srcLen:
eolMatch = EOLre.match(self.src(), self.pos()+1)
if not eolMatch:
self.advance()
raise ParseError(self, msg='Line ending expected')
self.setPos( eolMatch.end() )
elif c in '\r\n':
if enclosures:
self.advance()
else:
break
elif self.matchCheetahVarInExpressionStartToken():
expr = self.getCheetahVar()
exprBits.append(expr)
elif self.matchCheetahVarStart():
# it has syntax that is only valid at the top level
self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr()
else:
beforeTokenPos = self.pos()
token = self.getPyToken()
if (not enclosures
and pyTokensToBreakAt
and token in pyTokensToBreakAt):
self.setPos(beforeTokenPos)
break
token = self.transformToken(token, beforeTokenPos)
exprBits.append(token)
if identRE.match(token):
if token == 'for':
expr = self.getExpression(useNameMapper=False, pyTokensToBreakAt=['in'])
exprBits.append(expr)
else:
exprBits.append(self.getWhiteSpace())
if not self.atEnd() and self.peek() == '(':
exprBits.append(self.getCallArgString())
##
if useNameMapper is not Unspecified:
self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above
return exprBits
def getExpression(self,
enclosed=False,
enclosures=None, # list of tuples (char, pos), where # char is ({ or [
pyTokensToBreakAt=None,
useNameMapper=Unspecified,
):
"""Returns the output of self.getExpressionParts() as a concatenated
string rather than as a list.
"""
return ''.join(self.getExpressionParts(
enclosed=enclosed, enclosures=enclosures,
pyTokensToBreakAt=pyTokensToBreakAt,
useNameMapper=useNameMapper))
def transformToken(self, token, beforeTokenPos):
"""Takes a token from the expression being parsed and performs and
special transformations required by Cheetah.
At the moment only Cheetah's c'$placeholder strings' are transformed.
"""
if token=='c' and not self.atEnd() and self.peek() in '\'"':
nextToken = self.getPyToken()
token = nextToken.upper()
theStr = eval(token)
endPos = self.pos()
if not theStr:
return
if token.startswith(single3) or token.startswith(double3):
startPosIdx = 3
else:
startPosIdx = 1
#print 'CHEETAH STRING', nextToken, theStr, startPosIdx
self.setPos(beforeTokenPos+startPosIdx+1)
outputExprs = []
strConst = ''
while self.pos() < (endPos-startPosIdx):
if self.matchCheetahVarStart() or self.matchExpressionPlaceholderStart():
if strConst:
outputExprs.append(repr(strConst))
strConst = ''
placeholderExpr = self.getPlaceholder()
outputExprs.append('str('+placeholderExpr+')')
else:
strConst += self.getc()
self.setPos(endPos)
if strConst:
outputExprs.append(repr(strConst))
#if not self.atEnd() and self.matches('.join('):
# print 'DEBUG***'
token = "''.join(["+','.join(outputExprs)+"])"
return token
def _raiseErrorAboutInvalidCheetahVarSyntaxInExpr(self):
match = self.matchCheetahVarStart()
groupdict = match.groupdict()
if groupdict.get('cacheToken'):
raise ParseError(
self,
msg='Cache tokens are not valid inside expressions. '
'Use them in top-level $placeholders only.')
elif groupdict.get('enclosure'):
raise ParseError(
self,
msg='Long-form placeholders - ${}, $(), $[], etc. are not valid inside expressions. '
'Use them in top-level $placeholders only.')
else:
raise ParseError(
self,
msg='This form of $placeholder syntax is not valid here.')
def getPlaceholder(self, allowCacheTokens=False, plain=False, returnEverything=False):
# filtered
for callback in self.setting('preparsePlaceholderHooks'):
callback(parser=self)
startPos = self.pos()
lineCol = self.getRowCol(startPos)
startToken = self.getCheetahVarStartToken()
silentPlaceholderToken = self.getSilentPlaceholderToken()
if silentPlaceholderToken:
isSilentPlaceholder = True
else:
isSilentPlaceholder = False
if allowCacheTokens:
cacheToken = self.getCacheToken()
cacheTokenParts = self.cacheTokenRE.match(cacheToken).groupdict()
else:
cacheTokenParts = {}
if self.peek() in '({[':
pos = self.pos()
enclosureOpenChar = self.getc()
enclosures = [ (enclosureOpenChar, pos) ]
self.getWhiteSpace()
else:
enclosures = []
filterArgs = None
if self.matchIdentifier():
nameChunks = self.getCheetahVarNameChunks()
expr = self._compiler.genCheetahVar(nameChunks[:], plain=plain)
restOfExpr = None
if enclosures:
WS = self.getWhiteSpace()
expr += WS
if self.setting('allowPlaceholderFilterArgs') and self.peek()==',':
filterArgs = self.getCallArgString(enclosures=enclosures)[1:-1]
else:
if self.peek()==closurePairsRev[enclosureOpenChar]:
self.getc()
else:
restOfExpr = self.getExpression(enclosed=True, enclosures=enclosures)
if restOfExpr[-1] == closurePairsRev[enclosureOpenChar]:
restOfExpr = restOfExpr[:-1]
expr += restOfExpr
rawPlaceholder = self[startPos: self.pos()]
else:
expr = self.getExpression(enclosed=True, enclosures=enclosures)
if expr[-1] == closurePairsRev[enclosureOpenChar]:
expr = expr[:-1]
rawPlaceholder=self[startPos: self.pos()]
expr = self._applyExpressionFilters(expr,'placeholder',
rawExpr=rawPlaceholder,startPos=startPos)
for callback in self.setting('postparsePlaceholderHooks'):
callback(parser=self)
if returnEverything:
return (expr, rawPlaceholder, lineCol, cacheTokenParts,
filterArgs, isSilentPlaceholder)
else:
return expr
class _HighLevelParser(_LowLevelParser):
"""This class is a StateMachine for parsing Cheetah source and
sending state dependent code generation commands to
Cheetah.Compiler.Compiler.
"""
def __init__(self, src, filename=None, breakPoint=None, compiler=None):
_LowLevelParser.__init__(self, src, filename=filename, breakPoint=breakPoint)
self.setSettingsManager(compiler)
self._compiler = compiler
self.setupState()
self.configureParser()
def setupState(self):
self._macros = {}
self._macroDetails = {}
self._openDirectivesStack = []
def cleanup(self):
"""Cleanup to remove any possible reference cycles
"""
self._macros.clear()
for macroname, macroDetails in self._macroDetails.items():
macroDetails.template.shutdown()
del macroDetails.template
self._macroDetails.clear()
def configureParser(self):
_LowLevelParser.configureParser(self)
self._initDirectives()
def _initDirectives(self):
def normalizeParserVal(val):
if isinstance(val, (str,unicode)):
handler = getattr(self, val)
elif type(val) in (ClassType, TypeType):
handler = val(self)
elif callable(val):
handler = val
elif val is None:
handler = val
else:
raise Exception('Invalid parser/handler value %r for %s'%(val, name))
return handler
normalizeHandlerVal = normalizeParserVal
_directiveNamesAndParsers = directiveNamesAndParsers.copy()
customNamesAndParsers = self.setting('directiveNamesAndParsers',{})
_directiveNamesAndParsers.update(customNamesAndParsers)
_endDirectiveNamesAndHandlers = endDirectiveNamesAndHandlers.copy()
customNamesAndHandlers = self.setting('endDirectiveNamesAndHandlers',{})
_endDirectiveNamesAndHandlers.update(customNamesAndHandlers)
self._directiveNamesAndParsers = {}
for name, val in _directiveNamesAndParsers.items():
if val in (False, 0):
continue
self._directiveNamesAndParsers[name] = normalizeParserVal(val)
self._endDirectiveNamesAndHandlers = {}
for name, val in _endDirectiveNamesAndHandlers.items():
if val in (False, 0):
continue
self._endDirectiveNamesAndHandlers[name] = normalizeHandlerVal(val)
self._closeableDirectives = ['def','block','closure','defmacro',
'call',
'capture',
'cache',
'filter',
'if','unless',
'for','while','repeat',
'try',
]
for directiveName in self.setting('closeableDirectives',[]):
self._closeableDirectives.append(directiveName)
macroDirectives = self.setting('macroDirectives',{})
from Cheetah.Macros.I18n import I18n
macroDirectives['i18n'] = I18n
for macroName, callback in macroDirectives.items():
if type(callback) in (ClassType, TypeType):
callback = callback(parser=self)
assert callback
self._macros[macroName] = callback
self._directiveNamesAndParsers[macroName] = self.eatMacroCall
def _applyExpressionFilters(self, expr, exprType, rawExpr=None, startPos=None):
"""Pipes cheetah expressions through a set of optional filter hooks.
The filters are functions which may modify the expressions or raise
a ForbiddenExpression exception if the expression is not allowed. They
are defined in the compiler setting 'expressionFilterHooks'.
Some intended use cases:
- to implement 'restricted execution' safeguards in cases where you
can't trust the author of the template.
- to enforce style guidelines
filter call signature: (parser, expr, exprType, rawExpr=None, startPos=None)
- parser is the Cheetah parser
- expr is the expression to filter. In some cases the parser will have
already modified it from the original source code form. For example,
placeholders will have been translated into namemapper calls. If you
need to work with the original source, see rawExpr.
- exprType is the name of the directive, 'psp', or 'placeholder'. All
lowercase. @@TR: These will eventually be replaced with a set of
constants.
- rawExpr is the original source string that Cheetah parsed. This
might be None in some cases.
- startPos is the character position in the source string/file
where the parser started parsing the current expression.
@@TR: I realize this use of the term 'expression' is a bit wonky as many
of the 'expressions' are actually statements, but I haven't thought of
a better name yet. Suggestions?
"""
for callback in self.setting('expressionFilterHooks'):
expr = callback(parser=self, expr=expr, exprType=exprType,
rawExpr=rawExpr, startPos=startPos)
return expr
def _filterDisabledDirectives(self, directiveName):
directiveName = directiveName.lower()
if (directiveName in self.setting('disabledDirectives')
or (self.setting('enabledDirectives')
and directiveName not in self.setting('enabledDirectives'))):
for callback in self.setting('disabledDirectiveHooks'):
callback(parser=self, directiveName=directiveName)
raise ForbiddenDirective(self, msg='This %r directive is disabled'%directiveName)
## main parse loop
def parse(self, breakPoint=None, assertEmptyStack=True):
if breakPoint:
origBP = self.breakPoint()
self.setBreakPoint(breakPoint)
assertEmptyStack = False
while not self.atEnd():
if self.matchCommentStartToken():
self.eatComment()
elif self.matchMultiLineCommentStartToken():
self.eatMultiLineComment()
elif self.matchVariablePlaceholderStart():
self.eatPlaceholder()
elif self.matchExpressionPlaceholderStart():
self.eatPlaceholder()
elif self.matchDirective():
self.eatDirective()
elif self.matchPSPStartToken():
self.eatPSP()
elif self.matchEOLSlurpToken():
self.eatEOLSlurpToken()
else:
self.eatPlainText()
if assertEmptyStack:
self.assertEmptyOpenDirectivesStack()
if breakPoint:
self.setBreakPoint(origBP)
## non-directive eat methods
def eatPlainText(self):
startPos = self.pos()
match = None
while not self.atEnd():
match = self.matchTopLevelToken()
if match:
break
else:
self.advance()
strConst = self.readTo(self.pos(), start=startPos)
self._compiler.addStrConst(strConst)
return match
def eatComment(self):
isLineClearToStartToken = self.isLineClearToStartToken()
if isLineClearToStartToken:
self._compiler.handleWSBeforeDirective()
self.getCommentStartToken()
comm = self.readToEOL(gobble=isLineClearToStartToken)
self._compiler.addComment(comm)
def eatMultiLineComment(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getMultiLineCommentStartToken()
endPos = startPos = self.pos()
level = 1
while 1:
endPos = self.pos()
if self.atEnd():
break
if self.matchMultiLineCommentStartToken():
self.getMultiLineCommentStartToken()
level += 1
elif self.matchMultiLineCommentEndToken():
self.getMultiLineCommentEndToken()
level -= 1
if not level:
break
self.advance()
comm = self.readTo(endPos, start=startPos)
if not self.atEnd():
self.getMultiLineCommentEndToken()
if (not self.atEnd()) and self.setting('gobbleWhitespaceAroundMultiLineComments'):
restOfLine = self[self.pos():self.findEOL()]
if not restOfLine.strip(): # WS only to EOL
self.readToEOL(gobble=isLineClearToStartToken)
if isLineClearToStartToken and (self.atEnd() or self.pos() > endOfFirstLine):
self._compiler.handleWSBeforeDirective()
self._compiler.addComment(comm)
def eatPlaceholder(self):
(expr, rawPlaceholder,
lineCol, cacheTokenParts,
filterArgs, isSilentPlaceholder) = self.getPlaceholder(
allowCacheTokens=True, returnEverything=True)
self._compiler.addPlaceholder(
expr,
filterArgs=filterArgs,
rawPlaceholder=rawPlaceholder,
cacheTokenParts=cacheTokenParts,
lineCol=lineCol,
silentMode=isSilentPlaceholder)
return
def eatPSP(self):
# filtered
self._filterDisabledDirectives(directiveName='psp')
self.getPSPStartToken()
endToken = self.setting('PSPEndToken')
startPos = self.pos()
while not self.atEnd():
if self.peek() == endToken[0]:
if self.matchPSPEndToken():
break
self.advance()
pspString = self.readTo(self.pos(), start=startPos).strip()
pspString = self._applyExpressionFilters(pspString, 'psp', startPos=startPos)
self._compiler.addPSP(pspString)
self.getPSPEndToken()
## generic directive eat methods
_simpleIndentingDirectives = '''
else elif for while repeat unless try except finally'''.split()
_simpleExprDirectives = '''
pass continue stop return yield break
del assert raise
silent echo
import from'''.split()
_directiveHandlerNames = {'import':'addImportStatement',
'from':'addImportStatement', }
def eatDirective(self):
directiveName = self.matchDirective()
self._filterDisabledDirectives(directiveName)
for callback in self.setting('preparseDirectiveHooks'):
callback(parser=self, directiveName=directiveName)
# subclasses can override the default behaviours here by providing an
# eater method in self._directiveNamesAndParsers[directiveName]
directiveParser = self._directiveNamesAndParsers.get(directiveName)
if directiveParser:
directiveParser()
elif directiveName in self._simpleIndentingDirectives:
handlerName = self._directiveHandlerNames.get(directiveName)
if not handlerName:
handlerName = 'add'+directiveName.capitalize()
handler = getattr(self._compiler, handlerName)
self.eatSimpleIndentingDirective(directiveName, callback=handler)
elif directiveName in self._simpleExprDirectives:
handlerName = self._directiveHandlerNames.get(directiveName)
if not handlerName:
handlerName = 'add'+directiveName.capitalize()
handler = getattr(self._compiler, handlerName)
if directiveName in ('silent', 'echo'):
includeDirectiveNameInExpr = False
else:
includeDirectiveNameInExpr = True
expr = self.eatSimpleExprDirective(
directiveName,
includeDirectiveNameInExpr=includeDirectiveNameInExpr)
handler(expr)
##
for callback in self.setting('postparseDirectiveHooks'):
callback(parser=self, directiveName=directiveName)
def _eatRestOfDirectiveTag(self, isLineClearToStartToken, endOfFirstLinePos):
foundComment = False
if self.matchCommentStartToken():
pos = self.pos()
self.advance()
if not self.matchDirective():
self.setPos(pos)
foundComment = True
self.eatComment() # this won't gobble the EOL
else:
self.setPos(pos)
if not foundComment and self.matchDirectiveEndToken():
self.getDirectiveEndToken()
elif isLineClearToStartToken and (not self.atEnd()) and self.peek() in '\r\n':
# still gobble the EOL if a comment was found.
self.readToEOL(gobble=True)
if isLineClearToStartToken and (self.atEnd() or self.pos() > endOfFirstLinePos):
self._compiler.handleWSBeforeDirective()
def _eatToThisEndDirective(self, directiveName):
finalPos = endRawPos = startPos = self.pos()
directiveChar = self.setting('directiveStartToken')[0]
isLineClearToStartToken = False
while not self.atEnd():
if self.peek() == directiveChar:
if self.matchDirective() == 'end':
endRawPos = self.pos()
self.getDirectiveStartToken()
self.advance(len('end'))
self.getWhiteSpace()
if self.startswith(directiveName):
if self.isLineClearToStartToken(endRawPos):
isLineClearToStartToken = True
endRawPos = self.findBOL(endRawPos)
self.advance(len(directiveName)) # to end of directiveName
self.getWhiteSpace()
finalPos = self.pos()
break
self.advance()
finalPos = endRawPos = self.pos()
textEaten = self.readTo(endRawPos, start=startPos)
self.setPos(finalPos)
endOfFirstLinePos = self.findEOL()
if self.matchDirectiveEndToken():
self.getDirectiveEndToken()
elif isLineClearToStartToken and (not self.atEnd()) and self.peek() in '\r\n':
self.readToEOL(gobble=True)
if isLineClearToStartToken and self.pos() > endOfFirstLinePos:
self._compiler.handleWSBeforeDirective()
return textEaten
def eatSimpleExprDirective(self, directiveName, includeDirectiveNameInExpr=True):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
if not includeDirectiveNameInExpr:
self.advance(len(directiveName))
startPos = self.pos()
expr = self.getExpression().strip()
directiveName = expr.split()[0]
expr = self._applyExpressionFilters(expr, directiveName, startPos=startPos)
if directiveName in self._closeableDirectives:
self.pushToOpenDirectivesStack(directiveName)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
return expr
def eatSimpleIndentingDirective(self, directiveName, callback,
includeDirectiveNameInExpr=False):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
if directiveName not in 'else elif for while try except finally'.split():
self.advance(len(directiveName))
startPos = self.pos()
self.getWhiteSpace()
expr = self.getExpression(pyTokensToBreakAt=[':'])
expr = self._applyExpressionFilters(expr, directiveName, startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
if directiveName in 'else elif except finally'.split():
callback(expr, dedent=False, lineCol=lineCol)
else:
callback(expr, lineCol=lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=True))
self._compiler.commitStrConst()
self._compiler.dedent()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
if directiveName in self._closeableDirectives:
self.pushToOpenDirectivesStack(directiveName)
callback(expr, lineCol=lineCol)
def eatEndDirective(self):
isLineClearToStartToken = self.isLineClearToStartToken()
self.getDirectiveStartToken()
self.advance(3) # to end of 'end'
self.getWhiteSpace()
pos = self.pos()
directiveName = False
for key in self._endDirectiveNamesAndHandlers.keys():
if self.find(key, pos) == pos:
directiveName = key
break
if not directiveName:
raise ParseError(self, msg='Invalid end directive')
endOfFirstLinePos = self.findEOL()
self.getExpression() # eat in any extra comment-like crap
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
if directiveName in self._closeableDirectives:
self.popFromOpenDirectivesStack(directiveName)
# subclasses can override the default behaviours here by providing an
# end-directive handler in self._endDirectiveNamesAndHandlers[directiveName]
if self._endDirectiveNamesAndHandlers.get(directiveName):
handler = self._endDirectiveNamesAndHandlers[directiveName]
handler()
elif directiveName in 'block capture cache call filter errorCatcher'.split():
if key == 'block':
self._compiler.closeBlock()
elif key == 'capture':
self._compiler.endCaptureRegion()
elif key == 'cache':
self._compiler.endCacheRegion()
elif key == 'call':
self._compiler.endCallRegion()
elif key == 'filter':
self._compiler.closeFilterBlock()
elif key == 'errorCatcher':
self._compiler.turnErrorCatcherOff()
elif directiveName in 'while for if try repeat unless'.split():
self._compiler.commitStrConst()
self._compiler.dedent()
elif directiveName=='closure':
self._compiler.commitStrConst()
self._compiler.dedent()
# @@TR: temporary hack of useSearchList
self.setSetting('useSearchList', self._useSearchList_orig)
## specific directive eat methods
def eatBreakPoint(self):
"""Tells the parser to stop parsing at this point and completely ignore
everything else.
This is a debugging tool.
"""
self.setBreakPoint(self.pos())
def eatShbang(self):
# filtered
self.getDirectiveStartToken()
self.advance(len('shBang'))
self.getWhiteSpace()
startPos = self.pos()
shBang = self.readToEOL()
shBang = self._applyExpressionFilters(shBang, 'shbang', startPos=startPos)
self._compiler.setShBang(shBang.strip())
def eatEncoding(self):
# filtered
self.getDirectiveStartToken()
self.advance(len('encoding'))
self.getWhiteSpace()
startPos = self.pos()
encoding = self.readToEOL()
encoding = self._applyExpressionFilters(encoding, 'encoding', startPos=startPos)
self._compiler.setModuleEncoding(encoding.strip())
def eatCompiler(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
self.advance(len('compiler')) # to end of 'compiler'
self.getWhiteSpace()
startPos = self.pos()
settingName = self.getIdentifier()
if settingName.lower() == 'reset':
self.getExpression() # gobble whitespace & junk
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self._initializeSettings()
self.configureParser()
return
self.getWhiteSpace()
if self.peek() == '=':
self.advance()
else:
raise ParserError(self)
valueExpr = self.getExpression()
endPos = self.pos()
# @@TR: it's unlikely that anyone apply filters would have left this
# directive enabled:
# @@TR: fix up filtering, regardless
self._applyExpressionFilters('%s=%r'%(settingName, valueExpr),
'compiler', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
try:
self._compiler.setCompilerSetting(settingName, valueExpr)
except:
out = sys.stderr
print >> out, 'An error occurred while processing the following #compiler directive.'
print >> out, '-'*80
print >> out, self[startPos:endPos]
print >> out, '-'*80
print >> out, 'Please check the syntax of these settings.'
print >> out, 'A full Python exception traceback follows.'
raise
def eatCompilerSettings(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('compiler-settings')) # to end of 'settings'
keywords = self.getTargetVarsList()
self.getExpression() # gobble any garbage
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
if 'reset' in keywords:
self._compiler._initializeSettings()
self.configureParser()
# @@TR: this implies a single-line #compiler-settings directive, and
# thus we should parse forward for an end directive.
# Subject to change in the future
return
startPos = self.pos()
settingsStr = self._eatToThisEndDirective('compiler-settings')
settingsStr = self._applyExpressionFilters(settingsStr, 'compilerSettings',
startPos=startPos)
try:
self._compiler.setCompilerSettings(keywords=keywords, settingsStr=settingsStr)
except:
out = sys.stderr
print >> out, 'An error occurred while processing the following compiler settings.'
print >> out, '-'*80
print >> out, settingsStr.strip()
print >> out, '-'*80
print >> out, 'Please check the syntax of these settings.'
print >> out, 'A full Python exception traceback follows.'
raise
def eatAttr(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
self.advance(len('attr'))
self.getWhiteSpace()
startPos = self.pos()
if self.matchCheetahVarStart():
self.getCheetahVarStartToken()
attribName = self.getIdentifier()
self.getWhiteSpace()
self.getAssignmentOperator()
expr = self.getExpression()
expr = self._applyExpressionFilters(expr, 'attr', startPos=startPos)
self._compiler.addAttribute(attribName, expr)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
def eatDecorator(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
#self.advance() # eat @
startPos = self.pos()
decoratorExpr = self.getExpression()
decoratorExpr = self._applyExpressionFilters(decoratorExpr, 'decorator', startPos=startPos)
self._compiler.addDecorator(decoratorExpr)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self.getWhiteSpace()
directiveName = self.matchDirective()
if not directiveName or directiveName not in ('def', 'block', 'closure'):
raise ParseError(self, msg='Expected #def, #block or #closure')
self.eatDirective()
def eatDef(self):
# filtered
self._eatDefOrBlock('def')
def eatBlock(self):
# filtered
startPos = self.pos()
methodName, rawSignature = self._eatDefOrBlock('block')
self._compiler._blockMetaData[methodName] = {
'raw':rawSignature,
'lineCol':self.getRowCol(startPos),
}
def eatClosure(self):
# filtered
self._eatDefOrBlock('closure')
def _eatDefOrBlock(self, directiveName):
# filtered
assert directiveName in ('def','block','closure')
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
self.advance(len(directiveName))
self.getWhiteSpace()
if self.matchCheetahVarStart():
self.getCheetahVarStartToken()
methodName = self.getIdentifier()
self.getWhiteSpace()
if self.peek() == '(':
argsList = self.getDefArgList()
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
def includeBlockMarkers():
if self.setting('includeBlockMarkers'):
startMarker = self.setting('blockMarkerStart')
self._compiler.addStrConst(startMarker[0] + methodName + startMarker[1])
# @@TR: fix up filtering
self._applyExpressionFilters(self[startPos:self.pos()], 'def', startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
isNestedDef = (self.setting('allowNestedDefScopes')
and [name for name in self._openDirectivesStack if name=='def'])
self.getc()
rawSignature = self[startPos:endOfFirstLinePos]
self._eatSingleLineDef(directiveName=directiveName,
methodName=methodName,
argsList=argsList,
startPos=startPos,
endPos=endOfFirstLinePos)
if directiveName == 'def' and not isNestedDef:
#@@TR: must come before _eatRestOfDirectiveTag ... for some reason
self._compiler.closeDef()
elif directiveName == 'block':
includeBlockMarkers()
self._compiler.closeBlock()
elif directiveName == 'closure' or isNestedDef:
self._compiler.dedent()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
else:
if self.peek()==':':
self.getc()
self.pushToOpenDirectivesStack(directiveName)
rawSignature = self[startPos:self.pos()]
self._eatMultiLineDef(directiveName=directiveName,
methodName=methodName,
argsList=argsList,
startPos=startPos,
isLineClearToStartToken=isLineClearToStartToken)
if directiveName == 'block':
includeBlockMarkers()
return methodName, rawSignature
def _eatMultiLineDef(self, directiveName, methodName, argsList, startPos,
isLineClearToStartToken=False):
# filtered in calling method
self.getExpression() # slurp up any garbage left at the end
signature = self[startPos:self.pos()]
endOfFirstLinePos = self.findEOL()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
parserComment = ('## CHEETAH: generated from ' + signature +
' at line %s, col %s' % self.getRowCol(startPos)
+ '.')
isNestedDef = (self.setting('allowNestedDefScopes')
and len([name for name in self._openDirectivesStack if name=='def'])>1)
if directiveName=='block' or (directiveName=='def' and not isNestedDef):
self._compiler.startMethodDef(methodName, argsList, parserComment)
else: #closure
self._useSearchList_orig = self.setting('useSearchList')
self.setSetting('useSearchList', False)
self._compiler.addClosure(methodName, argsList, parserComment)
return methodName
def _eatSingleLineDef(self, directiveName, methodName, argsList, startPos, endPos):
# filtered in calling method
fullSignature = self[startPos:endPos]
parserComment = ('## Generated from ' + fullSignature +
' at line %s, col %s' % self.getRowCol(startPos)
+ '.')
isNestedDef = (self.setting('allowNestedDefScopes')
and [name for name in self._openDirectivesStack if name=='def'])
if directiveName=='block' or (directiveName=='def' and not isNestedDef):
self._compiler.startMethodDef(methodName, argsList, parserComment)
else: #closure
# @@TR: temporary hack of useSearchList
useSearchList_orig = self.setting('useSearchList')
self.setSetting('useSearchList', False)
self._compiler.addClosure(methodName, argsList, parserComment)
self.getWhiteSpace(max=1)
self.parse(breakPoint=endPos)
if directiveName=='closure' or isNestedDef: # @@TR: temporary hack of useSearchList
self.setSetting('useSearchList', useSearchList_orig)
def eatExtends(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('extends'))
self.getWhiteSpace()
startPos = self.pos()
if self.setting('allowExpressionsInExtendsDirective'):
baseName = self.getExpression()
else:
baseName = self.getDottedName()
baseName = self._applyExpressionFilters(baseName, 'extends', startPos=startPos)
self._compiler.setBaseClass(baseName) # in compiler
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
def eatImplements(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('implements'))
self.getWhiteSpace()
startPos = self.pos()
methodName = self.getIdentifier()
if not self.atEnd() and self.peek() == '(':
argsList = self.getDefArgList()
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
# @@TR: need to split up filtering of the methodname and the args
#methodName = self._applyExpressionFilters(methodName, 'implements', startPos=startPos)
self._applyExpressionFilters(self[startPos:self.pos()], 'implements', startPos=startPos)
self._compiler.setMainMethodName(methodName)
self._compiler.setMainMethodArgs(argsList)
self.getExpression() # throw away and unwanted crap that got added in
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
def eatSet(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
self.getDirectiveStartToken()
self.advance(3)
self.getWhiteSpace()
style = SET_LOCAL
if self.startswith('local'):
self.getIdentifier()
self.getWhiteSpace()
elif self.startswith('global'):
self.getIdentifier()
self.getWhiteSpace()
style = SET_GLOBAL
elif self.startswith('module'):
self.getIdentifier()
self.getWhiteSpace()
style = SET_MODULE
startsWithDollar = self.matchCheetahVarStart()
startPos = self.pos()
LVALUE = self.getExpression(pyTokensToBreakAt=assignmentOps, useNameMapper=False).strip()
OP = self.getAssignmentOperator()
RVALUE = self.getExpression()
expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip()
expr = self._applyExpressionFilters(expr, 'set', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
class Components: pass # used for 'set global'
exprComponents = Components()
exprComponents.LVALUE = LVALUE
exprComponents.OP = OP
exprComponents.RVALUE = RVALUE
self._compiler.addSet(expr, exprComponents, style)
def eatSlurp(self):
if self.isLineClearToStartToken():
self._compiler.handleWSBeforeDirective()
self._compiler.commitStrConst()
self.readToEOL(gobble=True)
def eatEOLSlurpToken(self):
if self.isLineClearToStartToken():
self._compiler.handleWSBeforeDirective()
self._compiler.commitStrConst()
self.readToEOL(gobble=True)
def eatRaw(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('raw'))
self.getWhiteSpace()
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
rawBlock = self.readToEOL(gobble=False)
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
rawBlock = self._eatToThisEndDirective('raw')
self._compiler.addRawText(rawBlock)
def eatInclude(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('include'))
self.getWhiteSpace()
includeFrom = 'file'
isRaw = False
if self.startswith('raw'):
self.advance(3)
isRaw=True
self.getWhiteSpace()
if self.startswith('source'):
self.advance(len('source'))
includeFrom = 'str'
self.getWhiteSpace()
if not self.peek() == '=':
raise ParseError(self)
self.advance()
startPos = self.pos()
sourceExpr = self.getExpression()
sourceExpr = self._applyExpressionFilters(sourceExpr, 'include', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.addInclude(sourceExpr, includeFrom, isRaw)
def eatDefMacro(self):
# @@TR: not filtered yet
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('defmacro'))
self.getWhiteSpace()
if self.matchCheetahVarStart():
self.getCheetahVarStartToken()
macroName = self.getIdentifier()
self.getWhiteSpace()
if self.peek() == '(':
argsList = self.getDefArgList(useNameMapper=False)
self.advance() # past the closing ')'
if argsList and argsList[0][0] == 'self':
del argsList[0]
else:
argsList=[]
assert not self._directiveNamesAndParsers.has_key(macroName)
argsList.insert(0, ('src',None))
argsList.append(('parser','None'))
argsList.append(('macros','None'))
argsList.append(('compilerSettings','None'))
argsList.append(('isShortForm','None'))
argsList.append(('EOLCharsInShortForm','None'))
argsList.append(('startPos','None'))
argsList.append(('endPos','None'))
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
macroSrc = self.readToEOL(gobble=False)
self.readToEOL(gobble=True)
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
macroSrc = self._eatToThisEndDirective('defmacro')
#print argsList
normalizedMacroSrc = ''.join(
['%def callMacro('+','.join([defv and '%s=%s'%(n,defv) or n
for n,defv in argsList])
+')\n',
macroSrc,
'%end def'])
from Cheetah.Template import Template
templateAPIClass = self.setting('templateAPIClassForDefMacro', default=Template)
compilerSettings = self.setting('compilerSettingsForDefMacro', default={})
searchListForMacros = self.setting('searchListForDefMacro', default=[])
searchListForMacros = list(searchListForMacros) # copy to avoid mutation bugs
searchListForMacros.append({'macros':self._macros,
'parser':self,
'compilerSettings':self.settings(),
})
templateAPIClass._updateSettingsWithPreprocessTokens(
compilerSettings, placeholderToken='@', directiveToken='%')
macroTemplateClass = templateAPIClass.compile(source=normalizedMacroSrc,
compilerSettings=compilerSettings)
#print normalizedMacroSrc
#t = macroTemplateClass()
#print t.callMacro('src')
#print t.generatedClassCode()
class MacroDetails: pass
macroDetails = MacroDetails()
macroDetails.macroSrc = macroSrc
macroDetails.argsList = argsList
macroDetails.template = macroTemplateClass(searchList=searchListForMacros)
self._macroDetails[macroName] = macroDetails
self._macros[macroName] = macroDetails.template.callMacro
self._directiveNamesAndParsers[macroName] = self.eatMacroCall
def eatMacroCall(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
startPos = self.pos()
self.getDirectiveStartToken()
macroName = self.getIdentifier()
macro = self._macros[macroName]
if hasattr(macro, 'parse'):
return macro.parse(parser=self, startPos=startPos)
if hasattr(macro, 'parseArgs'):
args = macro.parseArgs(parser=self, startPos=startPos)
else:
self.getWhiteSpace()
args = self.getExpression(useNameMapper=False,
pyTokensToBreakAt=[':']).strip()
if self.matchColonForSingleLineShortFormDirective():
isShortForm = True
self.advance() # skip over :
self.getWhiteSpace(max=1)
srcBlock = self.readToEOL(gobble=False)
EOLCharsInShortForm = self.readToEOL(gobble=True)
#self.readToEOL(gobble=False)
else:
isShortForm = False
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
srcBlock = self._eatToThisEndDirective(macroName)
if hasattr(macro, 'convertArgStrToDict'):
kwArgs = macro.convertArgStrToDict(args, parser=self, startPos=startPos)
else:
def getArgs(*pargs, **kws):
return pargs, kws
exec 'positionalArgs, kwArgs = getArgs(%(args)s)'%locals()
assert not kwArgs.has_key('src')
kwArgs['src'] = srcBlock
if type(macro)==new.instancemethod:
co = macro.im_func.func_code
elif (hasattr(macro, '__call__')
and hasattr(macro.__call__, 'im_func')):
co = macro.__call__.im_func.func_code
else:
co = macro.func_code
availableKwArgs = inspect.getargs(co)[0]
if 'parser' in availableKwArgs:
kwArgs['parser'] = self
if 'macros' in availableKwArgs:
kwArgs['macros'] = self._macros
if 'compilerSettings' in availableKwArgs:
kwArgs['compilerSettings'] = self.settings()
if 'isShortForm' in availableKwArgs:
kwArgs['isShortForm'] = isShortForm
if isShortForm and 'EOLCharsInShortForm' in availableKwArgs:
kwArgs['EOLCharsInShortForm'] = EOLCharsInShortForm
if 'startPos' in availableKwArgs:
kwArgs['startPos'] = startPos
if 'endPos' in availableKwArgs:
kwArgs['endPos'] = self.pos()
srcFromMacroOutput = macro(**kwArgs)
origParseSrc = self._src
origBreakPoint = self.breakPoint()
origPos = self.pos()
# add a comment to the output about the macro src that is being parsed
# or add a comment prefix to all the comments added by the compiler
self._src = srcFromMacroOutput
self.setPos(0)
self.setBreakPoint(len(srcFromMacroOutput))
self.parse(assertEmptyStack=False)
self._src = origParseSrc
self.setBreakPoint(origBreakPoint)
self.setPos(origPos)
#self._compiler.addRawText('end')
def eatCache(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('cache'))
startPos = self.pos()
argList = self.getDefArgList(useNameMapper=True)
argList = self._applyExpressionFilters(argList, 'cache', startPos=startPos)
def startCache():
cacheInfo = self._compiler.genCacheInfoFromArgList(argList)
self._compiler.startCacheRegion(cacheInfo, lineCol)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
startCache()
self.parse(breakPoint=self.findEOL(gobble=True))
self._compiler.endCacheRegion()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self.pushToOpenDirectivesStack('cache')
startCache()
def eatCall(self):
# @@TR: need to enable single line version of this
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('call'))
startPos = self.pos()
useAutocallingOrig = self.setting('useAutocalling')
self.setSetting('useAutocalling', False)
self.getWhiteSpace()
if self.matchCheetahVarStart():
functionName = self.getCheetahVar()
else:
functionName = self.getCheetahVar(plain=True, skipStartToken=True)
self.setSetting('useAutocalling', useAutocallingOrig)
# @@TR: fix up filtering
self._applyExpressionFilters(self[startPos:self.pos()], 'call', startPos=startPos)
self.getWhiteSpace()
args = self.getExpression(pyTokensToBreakAt=[':']).strip()
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self._compiler.startCallRegion(functionName, args, lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=False))
self._compiler.endCallRegion()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self.pushToOpenDirectivesStack("call")
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.startCallRegion(functionName, args, lineCol)
def eatCallArg(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('arg'))
startPos = self.pos()
self.getWhiteSpace()
argName = self.getIdentifier()
self.getWhiteSpace()
argName = self._applyExpressionFilters(argName, 'arg', startPos=startPos)
self._compiler.setCallArg(argName, lineCol)
if self.peek() == ':':
self.getc()
else:
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
def eatFilter(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('filter'))
self.getWhiteSpace()
startPos = self.pos()
if self.matchCheetahVarStart():
isKlass = True
theFilter = self.getExpression(pyTokensToBreakAt=[':'])
else:
isKlass = False
theFilter = self.getIdentifier()
self.getWhiteSpace()
theFilter = self._applyExpressionFilters(theFilter, 'filter', startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self.getWhiteSpace(max=1)
self._compiler.setFilter(theFilter, isKlass)
self.parse(breakPoint=self.findEOL(gobble=False))
self._compiler.closeFilterBlock()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self.pushToOpenDirectivesStack("filter")
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.setFilter(theFilter, isKlass)
def eatErrorCatcher(self):
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
self.getDirectiveStartToken()
self.advance(len('errorCatcher'))
self.getWhiteSpace()
startPos = self.pos()
errorCatcherName = self.getIdentifier()
errorCatcherName = self._applyExpressionFilters(
errorCatcherName, 'errorcatcher', startPos=startPos)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self._compiler.setErrorCatcher(errorCatcherName)
def eatCapture(self):
# @@TR: this could be refactored to use the code in eatSimpleIndentingDirective
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLinePos = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
self.advance(len('capture'))
startPos = self.pos()
self.getWhiteSpace()
expr = self.getExpression(pyTokensToBreakAt=[':'])
expr = self._applyExpressionFilters(expr, 'capture', startPos=startPos)
if self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self._compiler.startCaptureRegion(assignTo=expr, lineCol=lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=False))
self._compiler.endCaptureRegion()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos)
self.pushToOpenDirectivesStack("capture")
self._compiler.startCaptureRegion(assignTo=expr, lineCol=lineCol)
def eatIf(self):
# filtered
isLineClearToStartToken = self.isLineClearToStartToken()
endOfFirstLine = self.findEOL()
lineCol = self.getRowCol()
self.getDirectiveStartToken()
startPos = self.pos()
expressionParts = self.getExpressionParts(pyTokensToBreakAt=[':'])
expr = ''.join(expressionParts).strip()
expr = self._applyExpressionFilters(expr, 'if', startPos=startPos)
isTernaryExpr = ('then' in expressionParts and 'else' in expressionParts)
if isTernaryExpr:
conditionExpr = []
trueExpr = []
falseExpr = []
currentExpr = conditionExpr
for part in expressionParts:
if part.strip()=='then':
currentExpr = trueExpr
elif part.strip()=='else':
currentExpr = falseExpr
else:
currentExpr.append(part)
conditionExpr = ''.join(conditionExpr)
trueExpr = ''.join(trueExpr)
falseExpr = ''.join(falseExpr)
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self._compiler.addTernaryExpr(conditionExpr, trueExpr, falseExpr, lineCol=lineCol)
elif self.matchColonForSingleLineShortFormDirective():
self.advance() # skip over :
self._compiler.addIf(expr, lineCol=lineCol)
self.getWhiteSpace(max=1)
self.parse(breakPoint=self.findEOL(gobble=True))
self._compiler.commitStrConst()
self._compiler.dedent()
else:
if self.peek()==':':
self.advance()
self.getWhiteSpace()
self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine)
self.pushToOpenDirectivesStack('if')
self._compiler.addIf(expr, lineCol=lineCol)
## end directive handlers
def handleEndDef(self):
isNestedDef = (self.setting('allowNestedDefScopes')
and [name for name in self._openDirectivesStack if name=='def'])
if not isNestedDef:
self._compiler.closeDef()
else:
# @@TR: temporary hack of useSearchList
self.setSetting('useSearchList', self._useSearchList_orig)
self._compiler.commitStrConst()
self._compiler.dedent()
###
def pushToOpenDirectivesStack(self, directiveName):
assert directiveName in self._closeableDirectives
self._openDirectivesStack.append(directiveName)
def popFromOpenDirectivesStack(self, directiveName):
if not self._openDirectivesStack:
raise ParseError(self, msg="#end found, but nothing to end")
if self._openDirectivesStack[-1] == directiveName:
del self._openDirectivesStack[-1]
else:
raise ParseError(self, msg="#end %s found, expected #end %s" %(
directiveName, self._openDirectivesStack[-1]))
def assertEmptyOpenDirectivesStack(self):
if self._openDirectivesStack:
errorMsg = (
"Some #directives are missing their corresponding #end ___ tag: %s" %(
', '.join(self._openDirectivesStack)))
raise ParseError(self, msg=errorMsg)
Parser = _HighLevelParser
|
import re
import sys
import time
import socket
import urllib2
def is_valid_ip(addr):
"""
Thanks to @Markus Jarderot on Stack Overflow
http://stackoverflow.com/a/319293
"""
return is_valid_ipv4(addr) or is_valid_ipv6(addr)
def is_valid_ipv4(addr):
"""
Thanks to @Markus Jarderot on Stack Overflow
http://stackoverflow.com/a/319293
"""
pattern = re.compile(r"""
^
(?:
# Dotted variants:
(?:
# Decimal 1-255 (no leading 0's)
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2} # Hex 0x0 - 0xFF (possible leading 0's)
|
0+[1-3]?[0-7]{0,2} # Octal 0 - 0377 (possible leading 0's)
)
(?: # Repeat 0-3 times, separated by a dot
\.
(?:
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2}
|
0+[1-3]?[0-7]{0,2}
)
){0,3}
|
0x0*[0-9a-f]{1,8} # Hexadecimal notation, 0x0 - 0xffffffff
|
0+[0-3]?[0-7]{0,10} # Octal notation, 0 - 037777777777
|
# Decimal notation, 1-4294967295:
429496729[0-5]|42949672[0-8]\d|4294967[01]\d\d|429496[0-6]\d{3}|
42949[0-5]\d{4}|4294[0-8]\d{5}|429[0-3]\d{6}|42[0-8]\d{7}|
4[01]\d{8}|[1-3]\d{0,9}|[4-9]\d{0,8}
)
$
""", re.VERBOSE | re.IGNORECASE)
return pattern.match(addr) is not None
def is_valid_ipv6(addr):
"""
Thanks to @Markus Jarderot on Stack Overflow
http://stackoverflow.com/a/319293
"""
pattern = re.compile(r"""
^
\s* # Leading whitespace
(?!.*::.*::) # Only a single whildcard allowed
(?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard
(?: # Repeat 6 times:
[0-9a-f]{0,4} # A group of at most 4 hexadecimal digits
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
){6} #
(?: # Either
[0-9a-f]{0,4} # Another group
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
[0-9a-f]{0,4} # Last group
(?: (?<=::) # Colon iff preceeded by exacly one colon
| (?<!:) #
| (?<=:) (?<!::) : #
) # OR
| # A v4 address with NO leading zeros
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
(?: \.
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
){3}
)
\s* # Trailing whitespace
$
""", re.VERBOSE | re.IGNORECASE | re.DOTALL)
return pattern.match(addr) is not None
def ip_is_resolvable(ip_addr):
try:
return socket.gethostbyaddr(ip_addr)
except (socket.herror, socket.gaierror):
return None
def chunk_report(bytes_so_far, chunk_size, total_size):
percent = float(bytes_so_far) / total_size
if percent:
equals = '=' * int(percent * 39)
else:
equals = ''
equals += '>'
space = ' ' * (39 - len(equals))
percentage = '[%s%s] ' % (equals, space)
str_percent = str(int(percent * 100)) + '%'
text = "{:<4}{} {:,}\r".format(str_percent, percentage, bytes_so_far)
# slick way to print text on a single line repeatedly
print text,
if bytes_so_far >= total_size:
print '\n'
def chunk_read(response, chunk_size=8192, report_hook=None):
total_size = int(response.info().getheader('Content-Length').strip())
bytes_so_far = 0
while True:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
return bytes_so_far
def num_fmt(num):
# ignore anything smaller than a kilobyte
num /= 1024.0
if num < 1024.0:
return
for x in ['K', 'M', 'G']:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'T')
def wget(args):
url = args[0]
url_s = url.split('/')[0]
if not '://' in url:
url = 'http://' + url
localtime = str(time.strftime('%Y-%m-%e %H:%M:%S', time.localtime()))
print '--%s-- %s' % (localtime, url)
if is_valid_ip(url):
host = url_s
else:
host = url_s + ' (%s)' % url_s
sys.stdout.write('Resolving %s... ' % host)
addr = ip_is_resolvable(url_s)
if not addr:
print 'failed: Name or service not known.'
print 'wget: unable to resolve host address `%s\'' % url_s
return
else:
print addr[2][0]
host = host + '|%s|:80' % addr[2][0]
sys.stdout.write('Connecting to %s... ' % host)
try:
connection = urllib2.urlopen(url)
except: # make this smarter
print 'failed.'
return
print 'connected.'
print 'HTTP request sent, awaiting response... %s %s' % \
(connection.code, connection.msg)
ddl_size = connection.info().getheader('content-length')
size_fmt = num_fmt(int(ddl_size))
if size_fmt:
text = 'Length: %s (%s) [%s]' % (ddl_size, num_fmt(int(ddl_size)),
connection.info().type)
else:
text = 'Length: %s [%s]' % (ddl_size, connection.info().type)
print text
ddl_file = connection.info().getheader('content-disposition')
if not ddl_file:
ddl_file = urllib2.urlparse.urlsplit(url)[2].split('/')
ddl_file = ddl_file[len(ddl_file) - 1]
if not ddl_file:
ddl_file = 'index.html'
print "Saving to: '%s'\n" % ddl_file
localtime = str(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
speed = '6.59 MB/s'
chunk_read(connection, report_hook=chunk_report)
print "%s (%s) - '%s' saved [%s/%s]" % (localtime, speed, ddl_file,
ddl_size, ddl_size)
print ''
|
"""
Python library for the uh.cx link shortener.
http://uh.cx/
"""
import json
import requests
class Manager:
_url = 'http://uh.cx/api/create'
class Link:
url_original = ''
url_redirect = ''
url_preview = ''
qr_redirect = ''
qr_preview = ''
class InvalidResponseException(Exception):
pass
class CouldNotCreateLinkException(Exception):
pass
class ResponseValidator:
_keys = ['QrDirect', 'QrPreview', 'UrlDirect', 'UrlOriginal', 'UrlPreview']
@staticmethod
def check(response):
for key in Manager.ResponseValidator._keys:
if key not in response:
return False
return True
@staticmethod
def create(url):
data = json.dumps({'url': url})
response = requests.post(Manager._url, data)
if response.status_code != 200:
raise Manager.CouldNotCreateLinkException()
response_data = response.json()
if not Manager.ResponseValidator.check(response_data):
raise Manager.InvalidResponseException()
link = Manager.Link()
link.qr_preview = response_data['QrPreview']
link.qr_redirect = response_data['QrDirect']
link.url_original = response_data['UrlOriginal']
link.url_preview = response_data['UrlPreview']
link.url_redirect = response_data['UrlDirect']
return link
|
from tkinter import *
from tkinter import ttk
from functools import partial
from errors import InputError
class GUI:
def __init__(self, game):
# Initialise needed variables
self.game = game
self.no_dot = True
self.payment_flag = False
# Create window
self.root = Tk()
self.root.title("Monopoly Calculator")
self.root.resizable(FALSE, FALSE)
# Create main frame
self.main_frame = ttk.Frame(self.root, borderwidth=3, relief='raised')
self.main_frame.grid(column=0, row=0, sticky=(N, S, E, W))
# Create subframes
# Make "dot matrix" subframe
self.calc_matrix = ttk.Frame(
self.main_frame, borderwidth=3, relief='sunken')
self.calc_matrix.grid(column=0, row=0, sticky=(N, W, E))
# Make button subframe
self.button_frame = ttk.Frame(self.main_frame)
self.button_frame.grid(column=0, row=2, sticky=(S, W))
# Make info panel subframe
self.info_panel = ttk.Frame(
self.main_frame, borderwidth=10, relief='sunken')
self.info_panel.grid(column=2, row=0, rowspan=3, sticky=(N, E))
# Create labels for player list
self.player_list = ttk.Treeview(self.info_panel,
columns=('money', 'pk'),
displaycolumns=('money'),
selectmode='browse')
# Name the column headers
self.player_list.heading('#0', text='Name')
self.player_list.heading('money', text='Money')
# Populate tree
self.draw_player_list()
self.player_list.grid(column=0, row=0)
# Create labels for input and quantifier
self.number = StringVar()
self.quantifier = StringVar()
ttk.Label(self.calc_matrix, textvariable=self.number,
width=40, anchor=CENTER).grid(column=0, columnspan=3, row=0)
ttk.Label(self.calc_matrix, textvariable=self.quantifier,
anchor=E).grid(column=2, row=0, sticky=E)
# First row buttons
(ttk
.Button(self.button_frame, text="M",
command=partial(self.quantifier_input, "M"))
.grid(column=0, row=1))
(ttk
.Button(self.button_frame, text="←", command=self.backspace)
.grid(column=1, row=1))
(ttk
.Button(self.button_frame, text="K",
command=partial(self.quantifier_input, "K"))
.grid(column=2, row=1))
# Number grid
for i in range(0, 3):
for j in range(0, 3):
value = str((3 * i) + j + 1)
(ttk
.Button(self.button_frame, text=value,
command=partial(self.number_input, value))
.grid(column=j, row=i + 2))
# After number grid row buttons
(ttk
.Button(self.button_frame, text="C", command=self.clear_calc)
.grid(column=0, row=5))
(ttk
.Button(self.button_frame, text="0",
command=partial(self.number_input, "0"))
.grid(column=1, row=5))
(ttk
.Button(self.button_frame, text=".", command=self.dot_input)
.grid(column=2, row=5))
# Bottom row buttons
(ttk
.Button(self.button_frame, text="+", command=self.plus_clicked)
.grid(column=0, row=6))
(ttk
.Button(self.button_frame, text="+++", command=self.payment)
.grid(column=1, row=6))
(ttk
.Button(self.button_frame, text="-", command=self.minus_clicked)
.grid(column=2, row=6))
# Configure button frame paddings
for child in self.button_frame.winfo_children():
child.grid_configure(padx=5, pady=5)
# Make keybindings
# Number keybindings
for i in range(0, 10):
self.root.bind(str(i), partial(self.number_input, str(i)))
# Dot bind
self.root.bind('.', self.dot_input)
# Clear calc binds
self.root.bind('c', self.clear_calc)
self.root.bind('C', self.clear_calc)
# Quantifier binds
self.root.bind('m', partial(self.quantifier_input, 'M'))
self.root.bind('M', partial(self.quantifier_input, 'M'))
self.root.bind('k', partial(self.quantifier_input, 'K'))
self.root.bind('K', partial(self.quantifier_input, 'K'))
# Backspace bind
self.root.bind('<BackSpace>', self.backspace)
# Payment binds
self.root.bind('+', self.plus_clicked)
self.root.bind('-', self.minus_clicked)
self.root.bind('<space>', self.payment)
self.root.mainloop()
def number_input(self, *args):
self.number.set(self.number.get() + args[0])
def quantifier_input(self, *args):
self.quantifier.set(args[0])
def clear_calc(self, *args):
self.number.set("")
self.quantifier.set("")
self.no_dot = True
def dot_input(self, *args):
if self.no_dot and self.number.get() != "":
self.number_input(".")
self.no_dot = False
def backspace(self, *args):
old_value = self.number.get()
rest = old_value[:-1]
self.number.set(rest)
def plus_clicked(self, *args):
if self.payment_flag:
self.game.collect_pool_money(self.get_selected_player())
self.update_player_list()
self.payment_flag = False
else:
amount, quantifier = self.get_input()
self.game.add_money(self.get_selected_player(), amount, quantifier)
self.update_player_list()
def payment(self, *args):
self.payment_flag = True
amount, quantifier = self.get_input()
self.game.deduct_money(self.get_selected_player(), amount, quantifier)
self.game.pool_money(amount, quantifier)
self.update_player_list()
def minus_clicked(self, *args):
amount, quantifier = self.get_input()
self.game.deduct_money(self.get_selected_player(), amount, quantifier)
self.update_player_list()
def get_selected_player(self):
return self.player_list.focus()
def update_player_list(self):
for player in self.game.get_players():
self.player_list.delete(player.get_name())
self.draw_player_list()
self.clear_calc()
def draw_player_list(self):
for player in self.game.get_players():
self.player_list.insert('', 'end', player.get_name(),
text=player.get_name(),
values=(player.get_balance_display(),
player.get_pk()),
tags=(player.get_pk()))
def get_input(self):
amount = self.number.get()
quantifier = self.quantifier.get()
if quantifier == "":
raise InputError("Place a valid quantifier!")
return amount, quantifier
|
import sys
table = []
seam = 0
holder = []
def Read_File():
#holder = []
number_of_args = len(sys.argv)
file = open(sys.argv[1], "r")
index = 0
for line in file:
line1 = line.split(", ")
holder.append(line1)
index += 1
print holder
return holder
def Create_Table(holder):
table.append(holder[0])
i = 1
for i in range(1, len(holder)):
table.append([])
for j in range(len(holder)):
if j == 0:
table[i].append(float(holder[i][j]) + min(float(table[i-1][j]), float(table[i-1][j+1])))
elif j == (len(holder[i])-1):
table[i].append(float(holder[i][j]) + min(float(table[i-1][j]), float(table[i-1][j-1])))
else:
table[i].append(float(holder[i][j]) + min(min(float(table[i-1][j]), float(table[i-1][j-1])), float(table[i-1][j+1])))
thing = float("inf")
for i in range(len(table)):
if float(table[len(table)-1][i]) < thing:
thing = table[len(table)-1][i]
global seam
seam = i
winner = seam
trace = [[len(table)-1, winner, float(holder[len(table)-1][winner])]]
for i in range(len(table)-2, -1, -1):
if winner == 0:
if float(holder[i][winner]) < float(holder[i][winner+1]):
trace.append([i, winner, float(holder[i][winner])])
if float(holder[i][winner+1]) < float(holder[i][winner]):
winner += 1
trace.append([i, winner, float(holder[i][winner])])
elif winner == len(table[winner])-1:
if float(holder[i][winner]) < float(holder[i][winner-1]):
trace.append([i, winner, float(holder[i][winner])])
if float(holder[i][winner-1]) < float(holder[i][winner]):
winner -= 1
trace.append([i, winner, float(holder[i][winner])])
else:
if float(holder[i][winner]) < float(holder[i][winner+1]) and float(holder[i][winner]) < float(holder[i][winner-1]):
trace.append([i, winner, float(holder[i][winner])])
if float(holder[i][winner+1]) < float(holder[i][winner]) and float(holder[i][winner+1]) < float(holder[i][winner-1]):
winner += 1
trace.append([i, winner, float(holder[i][winner])])
if float(holder[i][winner-1]) < float(holder[i][winner]) and float(holder[i][winner-1]) < float(holder[i][winner+1]):
winner -= 1
trace.append([i, winner, float(holder[i][winner])])
return trace
def Write_to_File(s):
inputName = sys.argv[1][:len(sys.argv[1])-4]
filename = inputName + '_trace.txt'
output = open(filename ,'w')
output.write("Min Seam: ")
output.write(str(table[len(table)-1][seam]))
output.write('\n')
for i in range(0, len(table)):
output.write(str(s[i]))
output.write('\n')
Write_to_File(Create_Table(Read_File()))
|
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['behavir_tree_visualizer'],
package_dir={'': 'src'}
)
setup(**d)
|
import pytest
def test_operator_and(env):
def process(env):
timeout = [env.timeout(delay, value=delay) for delay in range(3)]
results = yield timeout[0] & timeout[1] & timeout[2]
assert results == {
timeout[0]: 0,
timeout[1]: 1,
timeout[2]: 2,
}
env.process(process(env))
env.run()
def test_operator_and_blocked(env):
def process(env):
timeout = env.timeout(1)
event = env.event()
yield env.timeout(1)
condition = timeout & event
assert not condition.triggered
env.process(process(env))
env.run()
def test_operator_or(env):
def process(env):
timeout = [env.timeout(delay, value=delay) for delay in range(3)]
results = yield timeout[0] | timeout[1] | timeout[2]
assert results == {
timeout[0]: 0,
}
env.process(process(env))
env.run()
def test_operator_nested_and(env):
def process(env):
timeout = [env.timeout(delay, value=delay) for delay in range(3)]
results = yield (timeout[0] & timeout[2]) | timeout[1]
assert results == {
timeout[0]: 0,
timeout[1]: 1,
}
assert env.now == 1
env.process(process(env))
env.run()
def test_operator_nested_or(env):
def process(env):
timeout = [env.timeout(delay, value=delay) for delay in range(3)]
results = yield (timeout[0] | timeout[1]) & timeout[2]
assert results == {
timeout[0]: 0,
timeout[1]: 1,
timeout[2]: 2,
}
assert env.now == 2
env.process(process(env))
env.run()
def test_nested_cond_with_error(env):
def explode(env):
yield env.timeout(1)
raise ValueError('Onoes!')
def process(env):
try:
yield env.process(explode(env)) & env.timeout(1)
pytest.fail('The condition should have raised a ValueError')
except ValueError as err:
assert err.args == ('Onoes!',)
env.process(process(env))
env.run()
def test_cond_with_error(env):
def explode(env, delay):
yield env.timeout(delay)
raise ValueError('Onoes, failed after %d!' % delay)
def process(env):
try:
yield env.process(explode(env, 0)) | env.timeout(1)
pytest.fail('The condition should have raised a ValueError')
except ValueError as err:
assert err.args == ('Onoes, failed after 0!',)
env.process(process(env))
env.run()
def test_cond_with_nested_error(env):
def explode(env, delay):
yield env.timeout(delay)
raise ValueError('Onoes, failed after %d!' % delay)
def process(env):
try:
yield (env.process(explode(env, 0)) & env.timeout(1) |
env.timeout(1))
pytest.fail('The condition should have raised a ValueError')
except ValueError as err:
assert err.args == ('Onoes, failed after 0!',)
env.process(process(env))
env.run()
def test_cond_with_uncaught_error(env):
"""Errors that happen after the condition has been triggered will not be
handled by the condition and cause the simulation to crash."""
def explode(env, delay):
yield env.timeout(delay)
raise ValueError('Onoes, failed after %d!' % delay)
def process(env):
yield env.timeout(1) | env.process(explode(env, 2))
env.process(process(env))
try:
env.run()
assert False, 'There should have been an exception.'
except ValueError:
pass
assert env.now == 2
def test_iand_with_and_cond(env):
def process(env):
cond = env.timeout(1, value=1) & env.timeout(2, value=2)
orig = cond
cond &= env.timeout(0, value=0)
assert cond is not orig
results = yield cond
assert list(results.values()) == [1, 2, 0]
env.process(process(env))
env.run()
def test_iand_with_or_cond(env):
def process(env):
cond = env.timeout(1, value=1) | env.timeout(2, value=2)
orig = cond
cond &= env.timeout(0, value=0)
assert cond is not orig
results = yield cond
assert list(results.values()) == [1, 0]
env.process(process(env))
env.run()
def test_ior_with_or_cond(env):
def process(env):
cond = env.timeout(1, value=1) | env.timeout(2, value=2)
orig = cond
cond |= env.timeout(0, value=0)
assert cond is not orig
results = yield cond
assert list(results.values()) == [0]
env.process(process(env))
env.run()
def test_ior_with_and_cond(env):
def process(env):
cond = env.timeout(1, value=1) & env.timeout(2, value=2)
orig = cond
cond |= env.timeout(0, value=0)
assert cond is not orig
results = yield cond
assert list(results.values()) == [0]
env.process(process(env))
env.run()
def test_immutable_results(env):
"""Results of conditions should not change after they have been
triggered."""
def process(env):
timeout = [env.timeout(delay, value=delay) for delay in range(3)]
# The or condition in this expression will trigger immediately. The and
# condition will trigger later on.
condition = timeout[0] | (timeout[1] & timeout[2])
results = yield condition
assert results == {timeout[0]: 0}
# Make sure that the results of condition were frozen. The results of
# the nested and condition do not become visible afterwards.
yield env.timeout(2)
assert results == {timeout[0]: 0}
env.process(process(env))
env.run()
def test_shared_and_condition(env):
timeout = [env.timeout(delay, value=delay) for delay in range(3)]
c1 = timeout[0] & timeout[1]
c2 = c1 & timeout[2]
def p1(env, condition):
results = yield condition
assert results == {timeout[0]: 0, timeout[1]: 1}
def p2(env, condition):
results = yield condition
assert results == {timeout[0]: 0, timeout[1]: 1, timeout[2]: 2}
env.process(p1(env, c1))
env.process(p2(env, c2))
env.run()
def test_shared_or_condition(env):
timeout = [env.timeout(delay, value=delay) for delay in range(3)]
c1 = timeout[0] | timeout[1]
c2 = c1 | timeout[2]
def p1(env, condition):
results = yield condition
assert results == {timeout[0]: 0}
def p2(env, condition):
results = yield condition
assert results == {timeout[0]: 0}
env.process(p1(env, c1))
env.process(p2(env, c2))
env.run()
def test_condition_value(env):
"""The value of a condition behaves like a readonly dictionary."""
timeouts = list([env.timeout(delay, value=delay) for delay in range(3)])
def p(env, timeouts):
results = yield env.all_of(timeouts)
assert list(results) == timeouts
assert list(results.keys()) == timeouts
assert list(results.values()) == [0, 1, 2]
assert list(results.items()) == list(zip(timeouts, [0, 1, 2]))
assert timeouts[0] in results
assert results[timeouts[0]] == 0
assert results == results
assert results == results.todict()
env.process(p(env, timeouts))
env.run()
def test_result_order(env):
"""The order of a conditions result is based on the order in which the
events have been specified."""
timeouts = list(reversed([env.timeout(delay) for delay in range(3)]))
def p(env, timeouts):
results = yield env.all_of(timeouts)
assert list(results.keys()) == timeouts
env.process(p(env, timeouts))
env.run()
def test_nested_result_order(env):
"""The order of a conditions result is based on the order in which the
events have been specified (even if nested)."""
timeouts = [env.timeout(delay) for delay in range(3)]
condition = (timeouts[0] | timeouts[1]) & timeouts[2]
def p(env, timeouts):
results = yield condition
assert list(results.keys()) == timeouts
env.process(p(env, timeouts))
env.run()
|
import cPickle
import sys, os
import numpy as np
def generate_data(pkl_path, norm, sz, maxlen, step, mm, reverse):
X = cPickle.load(open(pkl_path, 'rb'))[:mm+1]
if reverse:
X.reverse()
X = np.array(X)
mins = []
maxs = []
if norm == 'minmax':
mins = X.min(axis=0, keepdims=True)
maxs = X.max(axis=0, keepdims=True) + 1e-4
X = (X-mins) / (maxs - mins)
X[X>1] = 0
X = X*2 -1
if maxlen is None:
maxlen = (X.shape[0]-1)
maxlen = min(maxlen+1, X.shape[0])/step
assert(maxlen > 1)
# data = np.zeros([step, maxlen, X.shape[1]])
data = []
for i in range(step):
data.append( X[i:X.shape[0]:step][:maxlen])
if sz is None:
sz = len(data)
while len(data) < sz:
data.extend(data)
data = np.array(data[:sz])
data += np.random.random(data.shape)/100
np.random.shuffle(data)
X = data[:, :maxlen-1, :]
Y = data[:, -1, :]
return (X, Y, mins, maxs) # [timerange/maxlen, maxlen, 5]
def load_data(pkl_path, norm='minmax', sz=None, maxlen=None, step=1, mm=5000, reverse=False):
data = generate_data(pkl_path, norm, sz, maxlen, step, mm, reverse)
return data
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import datetime
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'ku7#3vgn-&ym68y^o72&j@j=23m&0hu2=u&&!8f^i0ei(o^j7('
DEBUG = True
PAGE_CACHE_SECONDS = 1
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'accounts'
]
MIDDLEWARE = [
# CORS for REST_FRAMEWORK
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bbs',
'USER': 'clovemac',
'PASSWORD': 'office',
'HOST': 'localhost',
'PORT': '5432',
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTH_USER_MODEL = 'accounts.User'
STATIC_URL = '/dist/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'dist'),
)
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
REST_SESSION_LOGIN = False
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny'
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
# add token autentication
# 'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 20,
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser'
),
}
JWT_AUTH = {
'JWT_SECRET_KEY': SECRET_KEY,
'JWT_AUTH_HEADER_PREFIX': 'Bearer',
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=14)
}
|
import datoszs.commands
from spiderpig import run_cli
if __name__ == "__main__":
run_cli(
command_packages=[datoszs.commands],
)
|
import codecs
import sys
def tidy(text):
return text.replace(u"\u00A0", " ").replace("$%", "<strong>").replace('%$','</strong>')
if __name__ == '__main__':
filename = sys.argv[1]
with codecs.open(filename, encoding='utf8') as infile:
text = tidy(infile.read())
with codecs.open(filename, 'w', encoding='utf8') as outfile:
outfile.write(text)
|
"""Integration with insteon devices."""
import json
import logging
import urllib
import time
from google.appengine.ext import ndb
from appengine import account, device, rest
@device.register('insteon_switch')
class InsteonSwitch(device.Switch):
"""Class represents a Insteon switch."""
insteon_device_id = ndb.IntegerProperty()
def handle_event(self, event):
self.account = event['account']
self.device_name = event['DeviceName']
self.insteon_device_id = event['DeviceID']
def sync(self):
"""Update the state of a light."""
my_account = self.find_account()
if not my_account:
logging.info("Couldn't find account.")
return
command = 'fast_on' if self.state else 'fast_off'
command = my_account.send_command(command, device_id=self.insteon_device_id)
logging.info(command)
@account.register('insteon')
class InsteonAccount(account.Account):
"""Class represents a Insteon account."""
BASE_URL = 'https://connect.insteon.com'
AUTH_URL = (BASE_URL + '/api/v2/oauth2/auth?' +
'client_id=%(client_id)s&state=%(state)s&'
'response_type=code&redirect_uri=' + account.REDIRECT_URL)
ACCESS_TOKEN_URL = (BASE_URL + '/api/v2/oauth2/token')
COMMAND_RETRIES = 10
def __init__(self, *args, **kwargs):
super(InsteonAccount, self).__init__(*args, **kwargs)
# pylint: disable=invalid-name
from common import creds
self.CLIENT_ID = creds.INSTEON_CLIENT_ID
self.CLIENT_SECRET = creds.INSTEON_CLIENT_SECRET
def _get_auth_headers(self):
return {'Authentication': 'APIKey %s' % self.CLIENT_ID,
'Authorization': 'Bearer %s' % self.access_token}
def _get_refresh_data(self):
values = {'client_id': self.CLIENT_ID,
'client_secret': self.CLIENT_SECRET}
if self.refresh_token is None:
values['grant_type'] = 'authorization_code'
values['code'] = self.auth_code
# Don't provide a redirect, or you'll get a 401
values['redirect_uri'] = ''
else:
values['grant_type'] = 'refresh_token'
values['refresh_token'] = self.refresh_token
return urllib.urlencode(values)
def get_human_type(self):
return 'Insteon'
@rest.command
def send_command(self, command, **kwargs):
"""Utility to send commands to API."""
if self.access_token is None:
logging.info('No access token, can\'t send command.')
return
kwargs['command'] = command
payload = json.dumps(kwargs)
result = self.do_request(
self.BASE_URL + '/api/v2/commands', method='POST', payload=payload,
headers={'Content-Type': 'application/json'})
logging.info(result)
state = None
for _ in range(self.COMMAND_RETRIES):
state = self.do_request(self.BASE_URL + result['link'])
logging.info(state)
assert state['status'] != 'failed'
if state['status'] == 'suceeded':
break
time.sleep(1)
return state
@rest.command
def refresh_devices(self):
if self.access_token is None:
logging.info('No access token, skipping.')
return
devices = self.do_request(self.BASE_URL + '/api/v2/devices?properties=all')
logging.info(devices)
events = []
for entry in devices['DeviceList']:
# This covers some swtich types, will need extending for more
if entry['DevCat'] == 2 and entry['SubCat'] in {53, 54, 55, 56, 57}:
entry['account'] = self.key.string_id()
events.append({
'device_type': 'insteon_switch',
'device_id': 'insteon-%s' % entry['InsteonID'],
'event': entry,
})
device.process_events(events)
|
__author__ = 'adam'
import unittest
from ErrorClasses import *
class TweetErrorTest(unittest.TestCase):
def setUp(self):
self.object = TweetError()
def tearDown(self):
pass
class TweetServiceErrorTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
class HashtagServiceErrorTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
class UserServiceErrorTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
import time
import unittest
import cryptocompare
import datetime
import calendar
import os
class TestCryptoCompare(unittest.TestCase):
def assertCoinAndCurrInPrice(self, coin, curr, price):
if isinstance(coin, list):
for co in coin:
self.assertCoinAndCurrInPrice(co, curr, price)
return
else:
self.assertIn(coin, price)
if isinstance(curr, list):
for cu in curr:
self.assertIn(cu, price[coin])
else:
self.assertIn(curr, price[coin])
def test_coin_list(self):
lst = cryptocompare.get_coin_list()
self.assertTrue('BTC' in lst.keys())
lst = cryptocompare.get_coin_list(True)
self.assertTrue('BTC' in lst)
def test_get_price(self):
coin = 'BTC'
price = cryptocompare.get_price(coin)
self.assertCoinAndCurrInPrice(coin, 'EUR', price)
price = cryptocompare.get_price(coin, currency='USD')
self.assertCoinAndCurrInPrice(coin, 'USD', price)
currencies = ['EUR', 'USD', 'GBP']
price = cryptocompare.get_price(coin, currency=currencies)
self.assertCoinAndCurrInPrice(coin, currencies, price)
coins = ['BTC', 'XMR']
price = cryptocompare.get_price(coins, currency=currencies)
self.assertCoinAndCurrInPrice(coins, currencies, price)
def test_get_price_full(self):
price = cryptocompare.get_price('ETH', full=True)
self.assertIn('RAW', price)
self.assertIn('ETH', price['RAW'])
self.assertIn('EUR', price['RAW']['ETH'])
self.assertIn('PRICE', price['RAW']['ETH']['EUR'])
def test_get_historical_price(self):
coin = 'XMR'
curr = 'EUR'
price = cryptocompare.get_historical_price(
'XMR', timestamp=datetime.date(2017, 6, 6))
self.assertCoinAndCurrInPrice(coin, curr, price)
price2 = cryptocompare.get_historical_price(
'XMR', 'EUR', datetime.datetime(2017, 6, 6))
self.assertCoinAndCurrInPrice(coin, curr, price2)
self.assertEqual(price, price2)
def test_price_day(self):
coin = 'BTC'
curr = 'USD'
price = cryptocompare.get_historical_price_day(
coin, currency=curr, limit=3, exchange='CCCAGG', toTs=datetime.datetime(2019, 6, 6))
for frame in price:
self.assertIn('time', frame)
def test_price_day_all(self):
coin = 'BTC'
curr = 'USD'
price = cryptocompare.get_historical_price_day_all(
coin, currency=curr, exchange='CCCAGG')
self.assertTrue(len(price) > 1)
for frame in price:
self.assertIn('time', frame)
def test_price_day_from(self):
coin = 'BTC'
curr = 'USD'
price = cryptocompare.get_historical_price_day_from(
coin, currency=curr, exchange='CCCAGG', toTs=int(calendar.timegm(datetime.datetime(2019, 6, 6).timetuple())),
fromTs = int(calendar.timegm(datetime.datetime(2019, 6, 4).timetuple())))
self.assertTrue(len(price) == 3)
for frame in price:
self.assertIn('time', frame)
def test_price_hour(self):
coin = 'BTC'
curr = 'USD'
price = cryptocompare.get_historical_price_hour(
coin, currency=curr, limit=3, exchange='CCCAGG', toTs=datetime.datetime(2019, 6, 6, 12))
for frame in price:
self.assertIn('time', frame)
def test_price_hour_from(self):
coin = 'BTC'
curr = 'USD'
price = cryptocompare.get_historical_price_hour_from(
coin, currency=curr, exchange='CCCAGG', toTs=int(calendar.timegm(datetime.datetime(2019, 6, 6, 3, 0, 0).timetuple())),
fromTs = int(calendar.timegm(datetime.datetime(2019, 6, 6, 1, 0, 0).timetuple())))
self.assertTrue(len(price) == 3)
for frame in price:
self.assertIn('time', frame)
def test_price_minute(self):
coin = 'BTC'
curr = 'USD'
price = cryptocompare.get_historical_price_minute(
coin, currency=curr, limit=3, exchange='CCCAGG', toTs=datetime.datetime.now())
for frame in price:
self.assertIn('time', frame)
def test_get_avg(self):
coin = 'BTC'
curr = 'USD'
avg = cryptocompare.get_avg(coin, curr, exchange='Kraken')
self.assertEqual(avg['LASTMARKET'], 'Kraken')
self.assertEqual(avg['FROMSYMBOL'], coin)
self.assertEqual(avg['TOSYMBOL'], curr)
def test_get_exchanges(self):
exchanges = cryptocompare.get_exchanges()
self.assertIn('Kraken', exchanges)
def test_get_pairs(self):
pairs = cryptocompare.get_pairs(exchange='Kraken')
self.assertEqual('Kraken', pairs[0]['exchange'])
def test_sets_api_key_using_environment_variable(self):
os.environ["CRYPTOCOMPARE_API_KEY"] = "Key"
api_key_parameter = cryptocompare.cryptocompare._set_api_key_parameter(
None)
assert api_key_parameter == "&api_key=Key"
def test_sets_api_key_with_no_env_var_and_none_passed(self):
if os.getenv("CRYPTOCOMPARE_API_KEY"):
del os.environ['CRYPTOCOMPARE_API_KEY']
api_key_parameter = cryptocompare.cryptocompare._set_api_key_parameter(
None)
assert api_key_parameter == ""
def test_sets_api_key_passed_in_works(self):
api_key_parameter = cryptocompare.cryptocompare._set_api_key_parameter(
"keytest")
assert api_key_parameter == "&api_key=keytest"
if __name__ == "__main__":
unittest.main()
|
"""If you run a Python socket server on a specific port and try to rerun it after closing it once, you
won't be able to use the same port.The remedy to this problem is to enable the socket reuse option, SO_REUSEADDR ."""
import socket
def reuse_socket_addr():
sock = socket.socket()
# get the old state of the SO_REUSEADDR option
old_state = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
print("Old sock state : {}".format(old_state))
# enable SO_REUSEADDR option
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
new_state = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
print("New sock state : {}".format(new_state))
local_port = 8282
# create socket object srv for server
srv = socket.socket()
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind socket
srv.bind(('', local_port))
# make server listen for 1 incoming connection
srv.listen(1)
print("Listening on port : {}".format(local_port))
while True:
try:
connection, addr = srv.accept()
print(connection)
print(addr)
print('Connected by {}:{}'.format(addr[0], addr[1]))
except KeyboardInterrupt:
break
except socket.error as msg:
print(msg)
if __name__ == '__main__':
reuse_socket_addr()
|
DEBUG = True
DEBUG_TB_INTERCEPT_REDIRECTS = False
SECRET_KEY = "\xdb\xf1\xf6\x14\x88\xd4i\xda\xbc/E'4\x7f`iz\x98r\xb9s\x1c\xca\xcd"
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://datamart:datamart@localhost/datamart'
MAIL_SERVER = 'localhost'
MAIL_PORT = 25
SECURITY_PASSWORD_HASH = 'plaintext'
UPLOADS_DEFAULT_DEST = '/Users/mscully/contracts/predict/datamart/uploads'
|
"""
Newrelic LVM Plugin
-------------
Plugin to monitor LVM Disk space left on NewRelic
"""
from setuptools import setup
setup(
name='nr_lvm_plugin',
version='0.1.6',
url='https://github.com/WebGeoServices/newrelic_lvm_plugin',
license='MIT',
author='WebGeoServices',
author_email='operation@webgeoservices.com',
description='Plugin to monitor LVM Disk space left on NewRelic',
long_description=__doc__,
scripts = ["nrlvmd.py"],
install_requires=[
'daemonize==2.4.7',
'requests==2.13.0'
],
download_url = 'https://github.com/WebGeoServices/newrelic_lvm_plugin/releases/tag/0.1.6',
keywords = ['newrelic', 'LVM', 'Thinpool'],
classifiers = [],
)
|
"""
"""
from __future__ import unicode_literals, absolute_import
import tempfile
import unittest
from os.path import join, exists
from autobit import manage, config
from autobit.db import Release
from autobit.tracker import totv
from autobit.classification import MediaType
class TestManage(unittest.TestCase):
def test_write_to_watch(self):
fname = "test_file.torrent"
torrent_data = b"torrent_file_data"
watch_dir = tempfile.mkdtemp()
config["WATCH_DIR"] = watch_dir
manage.write_to_watch(torrent_data, fname)
out_file = join(watch_dir, fname)
self.assertTrue(exists(out_file))
self.assertEqual(torrent_data, open(out_file, "rb").read())
def test_process_release(self):
tkr = totv.TitansOfTV()
release = Release("Orange.Is.the.New.Black.S03E12.PROPER.720p.WEBRip.x264-2HD", 17856,
MediaType.TV, tkr)
manage.process_release(release)
if __name__ == '__main__':
unittest.main()
|
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('marks', '0002_auto_20150415_2348'),
]
operations = [
migrations.CreateModel(
name='Suspension',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(max_length=255, verbose_name='beskrivelse')),
('active', models.BooleanField(default=False)),
('added_date', models.DateTimeField(auto_now=True)),
('expiration_date', models.DateField(verbose_name='utl\xf8psdato', null=True, editable=False, blank=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
),
]
|
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CustomIPPrefixesOperations:
"""CustomIPPrefixesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
custom_ip_prefix_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'customIpPrefixName': self._serialize.url("custom_ip_prefix_name", custom_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/customIpPrefixes/{customIpPrefixName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
custom_ip_prefix_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified custom IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param custom_ip_prefix_name: The name of the CustomIpPrefix.
:type custom_ip_prefix_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
custom_ip_prefix_name=custom_ip_prefix_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'customIpPrefixName': self._serialize.url("custom_ip_prefix_name", custom_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/customIpPrefixes/{customIpPrefixName}'} # type: ignore
async def get(
self,
resource_group_name: str,
custom_ip_prefix_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.CustomIpPrefix":
"""Gets the specified custom IP prefix in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param custom_ip_prefix_name: The name of the custom IP prefix.
:type custom_ip_prefix_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomIpPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.CustomIpPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomIpPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'customIpPrefixName': self._serialize.url("custom_ip_prefix_name", custom_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CustomIpPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/customIpPrefixes/{customIpPrefixName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
custom_ip_prefix_name: str,
parameters: "_models.CustomIpPrefix",
**kwargs: Any
) -> "_models.CustomIpPrefix":
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomIpPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'customIpPrefixName': self._serialize.url("custom_ip_prefix_name", custom_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CustomIpPrefix')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CustomIpPrefix', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('CustomIpPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/customIpPrefixes/{customIpPrefixName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
custom_ip_prefix_name: str,
parameters: "_models.CustomIpPrefix",
**kwargs: Any
) -> AsyncLROPoller["_models.CustomIpPrefix"]:
"""Creates or updates a custom IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param custom_ip_prefix_name: The name of the custom IP prefix.
:type custom_ip_prefix_name: str
:param parameters: Parameters supplied to the create or update custom IP prefix operation.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.CustomIpPrefix
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CustomIpPrefix or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_02_01.models.CustomIpPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomIpPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
custom_ip_prefix_name=custom_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('CustomIpPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'customIpPrefixName': self._serialize.url("custom_ip_prefix_name", custom_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/customIpPrefixes/{customIpPrefixName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
custom_ip_prefix_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.CustomIpPrefix":
"""Updates custom IP prefix tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param custom_ip_prefix_name: The name of the custom IP prefix.
:type custom_ip_prefix_name: str
:param parameters: Parameters supplied to update custom IP prefix tags.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomIpPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.CustomIpPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomIpPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'customIpPrefixName': self._serialize.url("custom_ip_prefix_name", custom_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CustomIpPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/customIpPrefixes/{customIpPrefixName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.CustomIpPrefixListResult"]:
"""Gets all the custom IP prefixes in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CustomIpPrefixListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.CustomIpPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomIpPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CustomIpPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/customIpPrefixes'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.CustomIpPrefixListResult"]:
"""Gets all custom IP prefixes in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CustomIpPrefixListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.CustomIpPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CustomIpPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CustomIpPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/customIpPrefixes'} # type: ignore
|
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryImageName": _SERIALIZER.url("gallery_image_name", gallery_image_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryImageName": _SERIALIZER.url("gallery_image_name", gallery_image_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryImageName": _SERIALIZER.url("gallery_image_name", gallery_image_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryImageName": _SERIALIZER.url("gallery_image_name", gallery_image_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_gallery_request(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class GalleryImagesOperations(object):
"""GalleryImagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImage",
**kwargs: Any
) -> "_models.GalleryImage":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_image, 'GalleryImage')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImage",
**kwargs: Any
) -> LROPoller["_models.GalleryImage"]:
"""Create or update a gallery image definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to
be created.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition to be created or updated.
The allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the
middle. The maximum length is 80 characters.
:type gallery_image_name: str
:param gallery_image: Parameters supplied to the create or update gallery image operation.
:type gallery_image: ~azure.mgmt.compute.v2021_07_01.models.GalleryImage
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either GalleryImage or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_07_01.models.GalleryImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image=gallery_image,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImageUpdate",
**kwargs: Any
) -> "_models.GalleryImage":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_image, 'GalleryImageUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImageUpdate",
**kwargs: Any
) -> LROPoller["_models.GalleryImage"]:
"""Update a gallery image definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to
be updated.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition to be updated. The allowed
characters are alphabets and numbers with dots, dashes, and periods allowed in the middle. The
maximum length is 80 characters.
:type gallery_image_name: str
:param gallery_image: Parameters supplied to the update gallery image operation.
:type gallery_image: ~azure.mgmt.compute.v2021_07_01.models.GalleryImageUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either GalleryImage or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_07_01.models.GalleryImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image=gallery_image,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> "_models.GalleryImage":
"""Retrieves information about a gallery image definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery from which the Image Definitions are
to be retrieved.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition to be retrieved.
:type gallery_image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GalleryImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_07_01.models.GalleryImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Delete a gallery image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to
be deleted.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition to be deleted.
:type gallery_image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace
def list_by_gallery(
self,
resource_group_name: str,
gallery_name: str,
**kwargs: Any
) -> Iterable["_models.GalleryImageList"]:
"""List gallery image definitions in a gallery.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery from which Image Definitions are to
be listed.
:type gallery_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryImageList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_07_01.models.GalleryImageList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImageList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_gallery_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
template_url=self.list_by_gallery.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_gallery_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("GalleryImageList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_gallery.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images'} # type: ignore
|
import wx
import pyHook
import pythoncom
import time
import win32api
import win32con
import threading
import pickle
from wx.lib.embeddedimage import PyEmbeddedImage
from wx.lib.wordwrap import wordwrap
figurefree = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAIAAAD8GO2jAAAAA3NCSVQICAjb4U/gAAAC0UlE"
"QVRIib2WPSxrYRjHf62qhJS2yiA+alBJDQY2kZgMEpsIiTAhjdEmBlPHLtI2JhIJNotVIjr6"
"FpNIK0iapsRXqGpxh8f73oN7ucnN6Tuc9Jw8p8/ze5/n/f+P5fn5GbBYLMDLywtQUlICrKys"
"AEtLS0BxcTHQ1tYGTE1NAa+vr4DVauWn9XPEfy5bOp0GioqKUARVVVVALBYDZmdngYaGBmBs"
"bAw4ODgAWltb+TcO8wlqamr0jRAIjXAkEgkUQUtLC3B6eooieHt7++ZaKALJJlNkzOz1ejVB"
"V1cXcHd3B5SVlekY47ty/brMJ/hb5tvbW6C2tlY/SSaTQHNzs34i3ZI5PD4+BuLxOJBKpYD7"
"+3sKQfDhxmZDTff29jYwMjKC6sTT05OubnFxETVR0onq6mqgrq4O8Pl8gMvlKiCBKJLdbgci"
"kQjQ1NQEOJ1OIBwOA4eHh8D6+jrQ3t4OjI+PA5WVld8kMJ9Adlxql3nY3NwE5ufndZDf70fR"
"TExMABcXF8DDwwOqB7Lj+XweeHx8BK6urgpBYJH8R0dHKO3s7OwEhoaGdNDZ2RmqNx6PR9PI"
"qZZ6d3Z2UOdfOHK5XCEIbJI5GAyinKunp+dTkFQkJ7m3txfo6OjQNcpbu7u7wMDAAFBfX4/q"
"jfkEk5OTKLcSlS8tLf0UtL+/D/T39wOrq6t8VKq9vT3U+TfW/u53phNMT0+jJlo8S3ZWlihi"
"d3c3sLa2BgQCARRlNpvVV6PKijOKsplPIFUvLy/r2jOZDGo2pFKjS4sWjY6OolxM4t1ut/5T"
"o8eYTzAzMwOcnJwAjY2NqB3s6+vjo0vLb4fDASwsLOhKhSkUCv2u2vClZD7B3NwcyptEiy4v"
"L1HzMzw8rEOlHxUVFcDg4CBwfX0NbGxsAOXl5X9MYL6aynl71w3D3kWjUWBrawul9Tc3N8D5"
"+TnKgeU8i29Lz75+rZpO8AsCJE578O/FcgAAAABJRU5ErkJggg==")
TheCrackOfDawn = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAIAAAD8GO2jAAAAA3NCSVQICAjb4U/gAAAGAElE"
"QVRIiXVWu44lWRGMiMw8VXX7NQ8WBxuPT0DiQ8HgE/YzkLAwQLhYOGjZnZmd7nurTgZGdQ8S"
"EscqnarKjIxIZQaR38EEQAY4YJJJqF2gibQOS4DAAgAQEGkz5Av8einSJMQGBbhN7d1I6oEM"
"ihMMrQ0BCWTwOknEIm9uS3FIsMgk2TA5GqRIpM2GQFASAAAGOGmmcpUF0ZSRVKDLzM4FyEZC"
"SQEMEGQCRQ7B4BADMEkyGrIIkqYB0vCglc0Lg9iHBiaDAMZiBBDmIEkvEM1gkqa42INUI6wB"
"tpBkSQTY3VHZ4ARskkjEpU2Oi3WYYSVYUMlJFCRjUEklKUNmkQNGaFCbQTLgNI2UpG6YIFSR"
"AJJxgTG1gUhtRjSLqhbE1QC1SnVMKAY4mNEIScaAygCigARMSaGGaWZENwCk4x5KYg0JWB1J"
"LnBkDrsUIQyIoxanyAJDDMYZXSZVOW1CIYGn7AApAkYynxxBbBMWLqphFpGqDRgmlWWSMVoF"
"igoTjLKGEAQhhU6FAQlBvqYggLQ+MMteWWGujnRkxnAW8ComYiDpXEkiJNIgY1BxxhGoYgOQ"
"KBAwEBEAUus7aGSsjtValGVRIWSBI3MxgSgpZoFGRlLRoQ5RggEFIKeiTOUOpwKcBkgmLx+t"
"wViQ1QrkkhJCXWUlYwQxI8jIFCVKorkExUhBJNUkk0wDWhNiKINtKdLbh6jFoMbiCEYxwqmK"
"ZCp0QkuSGkmKGZnqM9k4x4pIMRAFUipbUnZbIrPu7xRDDOYYY0DkEnOUQipV0rQrsiJHSkJF"
"rpOZDLLC8So7yixKWqIpUtNM48h4vEMwM5UDlRzRCytHBliZS0YJQ5XkwkhpyajJzBSjWsUU"
"bexSVSzEqlyDSQvMrHxYGsvCdXAZymSF1tBCj4jKWhQVHFhHVLFWpLhVhvIyVPSoTHHN+IKp"
"0IfUfeASWoNlh47Mba9q140Zy7rkOupSy0oOjDGS1tB2qaX0NMaS+/t1fSgtg4vnmszFq7iJ"
"txxfbrdfrLobc0sP9UIuifwY/6KRTu09GsuBdc/Lc4zLcrlft8q7zm3PZZd6vF/z/uXzHXQB"
"L4FLjOi5UoOFmT002e++7is7o9XHvN1y+/q3Grk4Hu+2uzGW5NNaTxWX/TY++emybQcuNxVV"
"n3aqkxqVFR7aY3/p5x+fjy8/HftA9jyu6M93tSWjGJelljXf//svygywj3l1O3r2fA49POjj"
"ZXt897A9rk/3l7v7u7zT08eH+8tlLD22qu2CbeOx4Xqb1+v8+uOxv+ja0j73G6L57J9/Rv72"
"Nx8/fPf07vHu8bJsI7YlthGrVHm7LLmXE/P4+nmM41p7xPWFvOkINz89zx+vtRjHz/N2Sz4r"
"bp24XV/i5fY4fvmPr49//MNf89c//P3pNh4+r3fva7lkrMtYq0bVirIenAzmJsyug9B03qA5"
"+3ZDcwI/3J6f9+N57vvt5eW6X+c/v+j7P/30++///DzF5Z6/+9V3to/9ZVTM40hqVH3dbz99"
"3pm8drcg+CHrOv2yz8P8dLv19BU4IKEJATkxD8C0ssBV3A+XsVPjYjhIHC3SgAFJuyRPzQNK"
"gQSmb22SgTbwOpAbbhgEYDFsBdWchbwZAAKRpwcA7LcnAM0Dplq2u9s2HAS/fQAANCUgiADU"
"p+fIgnL2BCgp2UcDBOjXOX7+yumGBZ15GzZNBtsmAdomAh2mAfAceqS7mQkes10RPNfC/znq"
"+cYFOdNE2HqDwLf90gBsixlRc7btCAF9HEf+v9j2hMA4Pd0rRPtbaODcWcZJLBFzzvOyu2fv"
"bgKIb1bsfw4ZYFgBklKfKioiwrbtiLC7PeGwDXDb1tk7TAN2G03ipKhPQG343LESHYAatvqU"
"y6bPEv4rNElmFgC0pxsCJija7nlIwYhoNyVAfTo7EsDJNUkS9ongW60USSqzjuPobgJm8+zy"
"15Y7GwyMCCIMQjJhv1Lcb+EInMAlvZGTZ9pTG3f3KwK4G3x1wN1NMgGedcuwQQOw+crktxxn"
"YSRJzbm/yvz2nqJBHPPVxMMkBYT9H3h01iCgfKKmAAAAAElFTkSuQmCC")
try:
GLOBAL_SETTINGS = pickle.load(open('config', 'r'))
except Exception, err:
GLOBAL_SETTINGS = {'leftclick':59, 'rightclick':60, 'hold':True}
ENTRIES = []
DOWNKEYS = set()
def OnHook(event):
scan_code = event.ScanCode
if scan_code == GLOBAL_SETTINGS['leftclick']:
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
time.sleep(0.05)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
return False
elif scan_code == GLOBAL_SETTINGS['rightclick']:
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN, 0, 0)
time.sleep(0.05)
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP, 0, 0)
return False
elif scan_code == 55:
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
return False
return True
def OnKeyUp(event):
code = event.ScanCode
if code in DOWNKEYS:
if code == GLOBAL_SETTINGS['leftclick']:
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
elif code == GLOBAL_SETTINGS['rightclick']:
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP, 0, 0)
DOWNKEYS.remove(code)
return False
return True
def OnKeyDown(event):
code = event.ScanCode
if code == GLOBAL_SETTINGS['leftclick'] :
#LOCK.acquire()
if not code in DOWNKEYS:
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
DOWNKEYS.add(code)
#LOCK.release()
return False
elif code == GLOBAL_SETTINGS['rightclick'] :
#LOCK.acquire()
if not code in DOWNKEYS:
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN, 0, 0)
DOWNKEYS.add(code)
#LOCK.release()
return False
return True
def OnSet(event):
scan_code = event.ScanCode
entry = ENTRIES.pop()
entry.SetValue(str(scan_code))
entry.SetBackgroundColour('#f0f0f0')
entry.Refresh()
ENTRIES.insert(0, entry)
ENTRIES[-1].SetBackgroundColour('green')
ENTRIES[-1].Refresh()
return False
def hook(callback):
hm = pyHook.HookManager()
if isinstance(callback, list):
hm.KeyDown = callback[0]
hm.KeyUp = callback[1]
else:
hm.KeyDown = callback
hm.HookKeyboard()
pythoncom.PumpMessages()
hm.UnhookKeyboard()
class TaskBarIcon(wx.TaskBarIcon):
ID_About = wx.NewId()
ID_Exit = wx.NewId()
ID_Stop = wx.NewId()
ID_Start = wx.NewId()
ID_Setting = wx.NewId()
def __init__(self):
wx.TaskBarIcon.__init__(self)
self.SetIcon(figurefree.getIcon(), 'figuerfreer')
self.Bind(wx.EVT_MENU, self.OnAbout, id=self.ID_About)
self.Bind(wx.EVT_MENU, self.OnExit, id=self.ID_Exit)
self.Bind(wx.EVT_MENU, self.OnStop, id=self.ID_Stop)
self.Bind(wx.EVT_MENU, self.OnStart, id=self.ID_Start)
self.Bind(wx.EVT_MENU, self.OnSetting, id=self.ID_Setting)
self.OnStart(None)
def OnStop(self, event):
win32api.PostThreadMessage(self.hook.ident, 0x12)
def OnStart(self, event):
if GLOBAL_SETTINGS['hold']:
self.hook = threading.Thread(target=hook, args=([OnKeyDown, OnKeyUp],))
else:
self.hook = threading.Thread(target=hook, args=(OnHook,))
self.hook.daemon = True
self.hook.start()
def OnAbout(self, event):
# First we create and fill the info object
painter = wx.Frame(None)
info = wx.AboutDialogInfo()
info.SetIcon(TheCrackOfDawn.getIcon())
info.Name = "figure freer"
info.Version = "1.0"
info.Copyright = "(c) cd"
info.Description = wordwrap(
"you can perform the mouse clicking action by any key you want."
"For now, it meets my need perfectly. So no more time will be paid."
"If you have any need, you are free to change or ever rewrite it.",
350, wx.ClientDC(painter))
info.WebSite = ("https://github.com/thecrackofdawn/figure-freer", "figure freer's source code")
info.Developers = ["weibo : TheCrackOfDawn",
"hoping more and more attention ^_^"]
licenseText = "MIT. You are free to do whatever you want."
info.License = wordwrap(licenseText, 500, wx.ClientDC(painter))
# Then we call wx.AboutBox giving it that info object
wx.AboutBox(info)
painter.Destroy()
def OnSetting(self, event):
alive = False
if self.hook.is_alive():
alive = True
self.OnStop(None)
dialog = SettingDialog()
result = dialog.ShowModal()
if result == wx.ID_OK:
GLOBAL_SETTINGS['leftclick'] = int(dialog.leftClick.GetValue())
GLOBAL_SETTINGS['rightclick'] = int(dialog.rightClick.GetValue())
pickle.dump(GLOBAL_SETTINGS, open('config', 'w'))
dialog.Stop()
dialog.Destroy()
if alive:
self.OnStart(None)
def OnExit(self, event):
self.RemoveIcon()
self.Destroy()
def CreatePopupMenu(self):
menu = wx.Menu()
menu.Append(self.ID_About, 'about')
if self.hook.is_alive():
menu.Append(self.ID_Stop, 'stop')
else:
menu.Append(self.ID_Start, 'start')
menu.Append(self.ID_Setting, 'settings')
menu.Append(self.ID_Exit, 'exit')
return menu
class SettingDialog(wx.Dialog):
def __init__(self):
global ENTRIES
wx.Dialog.__init__(self, None, -1, 'settings', size=(250, 200))
panel = wx.Panel(self)
wx.StaticText(panel, -1, 'left click:', pos=(20, 30))
self.leftClick = wx.TextCtrl(panel, -1, pos=(100, 27), value=str(GLOBAL_SETTINGS['leftclick']), style=wx.TE_READONLY)
wx.StaticText(panel, -1, 'right click:', pos=(20, 70))
self.rightClick = wx.TextCtrl(panel, -1, pos=(100, 67), value=str(GLOBAL_SETTINGS['rightclick']), style=wx.TE_READONLY)
self.hold = wx.CheckBox(panel, -1, 'simulate holding', pos=(60, 110))
self.hold.SetValue(GLOBAL_SETTINGS['hold'])
self.hold.Bind(wx.EVT_CHECKBOX, self.OnCheckBox)
okButton = wx.Button(panel, wx.ID_OK, 'OK', pos=(20, 140))
okButton.SetDefault()
cancelButton = wx.Button(panel, wx.ID_CANCEL, 'cancel', pos=(140, 140))
ENTRIES = [self.rightClick, self.leftClick]
ENTRIES[-1].SetBackgroundColour('green')
self.Start()
def OnCheckBox(self, event):
GLOBAL_SETTINGS['hold'] = self.hold.GetValue()
def Stop(self):
win32api.PostThreadMessage(self.hook.ident, 0x12)
def Start(self):
self.hook = threading.Thread(target=hook, args=(OnSet,))
self.hook.daemon = True
self.hook.start()
def run():
app = wx.App()
taskBar = TaskBarIcon()
app.MainLoop()
if __name__ == '__main__':
run()
|
import sys
import os
import re
import argparse
import fileinput
import copy
import itertools
from argparse import RawDescriptionHelpFormatter
from patterns import main_patterns
BUFFER_SIZE = 30
class Buffer(object):
"""Class for buffering input"""
def __init__(self, output_filename=os.devnull):
self.buf = [''] * BUFFER_SIZE
self.line_scrolled = 0
self.matched_lines = []
self.unmatched_stream = open(output_filename, 'w')
self.updated = True
self.last_text = ''
def add(self, line):
self.buf.append(line)
if 0 not in self.matched_lines:
self.unmatched_stream.write(line)
self.buf = self.buf[1:BUFFER_SIZE+1]
self.matched_lines = filter(lambda x: x >= 0,
map(lambda x: x - 1, self.matched_lines))
self.line_scrolled += 1
self.updated = True
@property
def text(self):
"""lazy function, returning joined lines
:returns: str()
"""
if self.updated:
self.last_text = ''.join(self.buf)
self.updated = False
return self.last_text
def try_to_match(self, pattern):
matched = pattern.search(self.text)
if matched:
self.mark_matched(pattern)
return matched
def mark_matched(self, pattern):
lines_count = len(pattern.split('\n'))
marked_lines = range(BUFFER_SIZE - lines_count, BUFFER_SIZE)
self.matched_lines += marked_lines
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
for _ in xrange(BUFFER_SIZE):
self.add('')
self.unmatched_stream.close()
class SnippetsQueue(object):
def __init__(self, snippets_count):
self.SNIPPETS_TO_SHOW = snippets_count
self.new_snippets = []
self.ready_snippets = {}
self.pattern_used = {}
def push(self, snippet):
pattern = snippet.pattern
if not self.pattern_used.get(pattern, False):
self.new_snippets.append(snippet)
self.pattern_used[pattern] = True
def add(self, line):
for snippet in self.new_snippets:
if snippet.full():
self.make_ready(snippet)
else:
snippet.add(line)
def make_ready(self, snippet):
self.new_snippets.remove(snippet)
pattern = snippet.pattern
try:
self.ready_snippets[pattern].append(snippet)
except KeyError:
self.ready_snippets[pattern] = [snippet]
self.pattern_used[pattern] = False
if len(self.ready_snippets[pattern]) == self.SNIPPETS_TO_SHOW:
self.pattern_used[pattern] = True
def make_all_ready(self):
for snippet in self.new_snippets:
self.make_ready(snippet)
class StatCollector(object):
"""Class for collecting statistics"""
def __init__(self):
self.match_count = {}
self.lines_count = 0
self.number_of_matched_lines = 0
pass
def add(self, pattern):
"""Add pattern matching event"""
if pattern not in self.match_count:
self.match_count[pattern] = 0
self.match_count[pattern] += 1
self.number_of_matched_lines += len(pattern.split('\n'))
def print_stat(stat_collector, snippets_queue=None, **kwargs):
"""Print statistics and snippets"""
output_stream = sys.stdout
for pattern, count in stat_collector.match_count.iteritems():
output_stream.write("""\
********************************************************************************
pattern: "{}"
--------------------------------------------------------------------------------
number of matches: {}
********************************************************************************\n\
""".format(pattern, count))
if snippets_queue:
if pattern in snippets_queue.ready_snippets:
for snippet in snippets_queue.ready_snippets[pattern]:
snippet.show()
output_stream.write('-' * 80 + '\n')
else:
output_stream.write('|No snippets found : (\n')
output_stream.write('-' * 80 + '\n')
output_stream.write('patterns used: {}/{}\n'.format(
len(stat_collector.match_count),
len(kwargs['patterns'])))
output_stream.write('number of lines matched: {}/{}\n'.format(
stat_collector.number_of_matched_lines,
stat_collector.lines_count))
def make_escaped(string):
"""Make escaped pattern from string
:string: string to be escaped
:returns: pattern
"""
return re.escape(string.replace('\n', 'NSLPH')).replace('NSLPH', '\n') + r'\n\Z'
def get_stat(input_files, snippets_count=0, context=3,
patterns=main_patterns,
output_stream=sys.stdout, snippets_file=None,
unmatched_filename=os.devnull):
"""Show statistics for patterns
:input_stream: input stream
:snippets_count: maximum number of snippets to show for every pattern
:context: number of lines in snippet around last line of pattern match
:patterns: patterns that need to look
:output_stream: stream for output
:returns: None
"""
input_stream = fileinput.input(input_files)
LINES_ABOVE = context
LINES_BELOW = context
SHOW_SNIPPETS = snippets_count > 0
class Snippet(object):
def __init__(self, lines_above, pattern, line_number):
self.text = copy.copy(lines_above)
self.pattern = pattern
self.line_number = line_number
def full(self):
return len(self.text) >= LINES_ABOVE + LINES_BELOW + 1
def add(self, line):
self.text.append(line)
def show(self):
i = 0
for line in self.text:
if i == LINES_ABOVE:
output_stream.write('| {} ==>'.format(self.line_number))
else:
if re.search(self.pattern, line):
output_stream.write('|-->')
else:
output_stream.write('|>')
output_stream.write(line)
i += 1
stat_collector = StatCollector()
snippets_queue = SnippetsQueue(snippets_count)
line_number = 1
compiled_patterns = map(re.compile, patterns)
with Buffer(unmatched_filename) as input_buffer:
if snippets_file:
snippets_buffer = Buffer()
else:
snippets_buffer = input_buffer
for line in input_stream:
input_buffer.add(line)
if snippets_file:
line = snippets_file.readline()
snippets_buffer.add(line)
for pattern in compiled_patterns:
if input_buffer.try_to_match(pattern):
if SHOW_SNIPPETS:
if LINES_ABOVE > 0:
snippet_begining = snippets_buffer.buf[-LINES_ABOVE:]
else:
snippet_begining = []
snippet = Snippet(snippet_begining, pattern.pattern,
line_number)
snippets_queue.push(snippet)
stat_collector.add(pattern.pattern)
if SHOW_SNIPPETS:
snippets_queue.add(line)
line_number += 1
stat_collector.lines_count = line_number - 1
if not SHOW_SNIPPETS:
snippets_queue = None
else:
snippets_queue.make_all_ready()
return (stat_collector, snippets_queue)
def show_patterns():
"""Show patterns from patterns.py"""
for pattern in main_patterns:
print pattern
print '-' * 80
def print_escaped(files):
input_stream = fileinput.input(files)
text = ''
for line in input_stream:
text += line
escaped_text = make_escaped(text)
print '\n'
print escaped_text
def main():
parser = argparse.ArgumentParser(description=\
"""
Parse file[s]\n\n
examples:
./%(prog)s file[s] -s 2 -C 3
cat error_log | tail -n 1000 | ./prepare.py | ./%(prog)s
./%(prog)s -p
echo "some \\ntext, that wants to be pattern" | ./%(prog)s -e
""", formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('file', nargs='*', default=[],
help='file[s] to be parsed')
parser.add_argument('-s', '--snippets', nargs='?', type=int, const=5,
help='show maximum SNIPPETS snippets (5 default)')
parser.add_argument('-C', '--context', nargs='?', type=int,
help='show CONTEXT lines around pattern last line')
parser.add_argument('-p', '--patterns', action='store_const', const=True,
help='show patterns')
parser.add_argument('-e', '--escape', action='store_const', const=True,
help='escape given string')
parser.add_argument('-o', '--original', nargs=1,
help='provide original file for better snippets')
parser.add_argument('-u', '--unmatched', nargs=1,
help='output unmatched text to file')
args = parser.parse_args()
if args.patterns:
show_patterns()
return
if args.escape:
print_escaped(args.file)
return
kwargs = {}
kwargs['input_files'] = args.file
if args.snippets:
kwargs['snippets_count'] = args.snippets
if args.context is not None:
kwargs['context'] = args.context
if args.original:
kwargs['snippets_file'] = fileinput.input(args.original)
if args.unmatched is not None:
kwargs['unmatched_filename'] = args.unmatched[0]
kwargs['patterns'] = main_patterns
result = get_stat(**kwargs)
print_stat(*result, **kwargs)
if __name__ == '__main__':
main()
|
__doc__ = \
"""
Module to spawn modem connectivity
"""
__author__ = "Praneeth Bodduluri <lifeeth[at]gmail.com>"
import sys, os
path = os.path.join(request.folder, "modules")
if not path in sys.path:
sys.path.append(path)
import pygsm
import threading
import time
from pygsm.autogsmmodem import GsmModemNotFound
import s3msg
class ModemThread( threading.Thread ):
def __init__(self, modem):
self.modem = modem
threading.Thread.__init__ ( self )
self.msg = s3msg.Msg(globals(), db, T, modem=modem)
def run(self):
boxdata = self.modem.query("AT+CMGD=?")
boxsize = int(boxdata.split("(")[1].split(")")[0].split("-")[1])
cleanup = False
while True:
self.msg.process_outbox(contact_method=2, option=2)
for i in range(5):
# parse 5 messages in one shot
message = self.modem.next_message()
if message is not None:
cleanup = True
# for debug purposes
#print "Got message: " + message.text
# Temp: SMS AutoResponder on by default
#self.modem.send_sms(message.sender,"This is to be replaced with the autorespond message")
self.receive_msg(message=message.text, fromaddress=message.sender, pr_message_method=2)
# ^ dependent on the pr_message_method
if cleanup:
for i in range(boxsize): # For cleaning up read messages.
try:
temp = self.modem.command("AT+CMGR=" + str(i+1) + ",1")
if "REC READ" in temp[0]:
self.modem.query("AT+CMGD=" + str(i+1))
except:
pass
cleanup = False
time.sleep(5)
#self.modem.send_sms("9935648569", "Hey!")
modem_configs = db(db.msg_modem_settings.enabled == True).select()
modems = []
for modem in modem_configs:
# mode is set to text as PDU mode is flaky
modems.append(pygsm.GsmModem(port=modem.modem_port, baudrate=modem.modem_baud, mode="text"))
if len(modems) == 0:
# If no modem is found try autoconfiguring - We shouldn't do this anymore
#try:
# modems.append(pygsm.AutoGsmModem())
#except GsmModemNotFound, e:
# # No way yet to pass back the error yet
# pass
pass
else:
# Starting a thread for each modem we have
for modem in modems:
ModemThread(modem).run()
|
import random
import requests
from flask import Flask, redirect, render_template
app = Flask(__name__, static_url_path='')
API_KEY = 'dc6zaTOxFJmzC'
@app.route('/')
def entry_point():
return render_template('index.html')
@app.route('/query/<path:query>')
def what(query):
# Query Giphy API if there is a query
response = requests.get(
'http://api.giphy.com/v1/gifs/search?q={}&api_key={}&limit=10'.format(
query, API_KEY
)
)
giphy_gifs = response.json()['data']
# Show the first GIF
if len(giphy_gifs) > 0:
return redirect(
giphy_gifs[
random.randint(0, 100) % len(giphy_gifs)
]['images']['original']['url'],
code=302
)
# Show a failure message
else:
return render_template(
'nogif.html', title='Awww, no GIF for {}'.format(query)
)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
import os
def _post_generate():
""" convert CRLF to LF line separator
Because cookcutter #405 bug
:param temp_work_dir:
:param output_project:
:return:
"""
post = ["ci/analysis.sh", "ci/deploy.sh", "ci/ut.sh", "ci/at.sh", "ci/it.sh", "ci/build.sh"]
for post_file_path in post:
if not os.path.exists(post_file_path):
continue
lines = open(post_file_path, 'rb').readlines()
# convert CRLF to LF
with open(post_file_path, 'wb') as tmp:
tmp.writelines([line.strip() + '\n' for line in lines])
_post_generate()
|
def f(e, p=[]):
p.append(e)
return p
lists = []
for idx in range(5):
lists.append(f(idx))
print lists
print set(id(l) for l in lists)
|
from Card import Card
class CardCollection:
# Initialise fields
def __init__(self):
self.cards = []
# Error checks
def __check__(self, card):
if not isinstance(card, Card):
raise ValueError("argument not of type: Card")
return card
# String representation
def __toString__(self, card):
return "%s%s" % (card.rank, card.suit)
# Other methods
def contains(self, card):
return (self.__check__(card) in self.cards)
def isEmpty(self):
return len(self.cards) == 0
def size(self):
return len(self.cards)
def add(self, card):
if (self.contains(self.__check__(card))):
raise ValueError("card already present")
self.cards.append((self.__toString__(card)))
def sort(self):
self.cards = sorted(self.cards)
|
def run(whatweb, pluginname):
whatweb.recog_from_content(pluginname, "OAapp.woa")
|
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
import rcquerybuilder
|
""""
ProjectName: liv-api
Repo: https://github.com/chrisenytc/liv-api
Copyright (c) 2014 Christopher EnyTC
Licensed under the MIT license.
"""
from api import app
from flask import request
from flask import jsonify as JSON
from api.models.user import User
from cors import cors
from hashlib import sha1
from auth import require_auth
@app.route('/me')
@cors(origin='*')
@require_auth(roles='*')
def me_index():
request.user.pop('password', None)
request.user.pop('modules', None)
return JSON(request.user)
@app.route('/me', methods=['PUT'])
@cors(origin='*', methods=['PUT'])
@require_auth(roles='*')
def me_update():
data = request.get_json()
user = User.objects(email=request.user['email'])
if data['name'] != '':
user.update(set__name=data['name'])
if data['email'] != '':
user.update(set__email=data['email'])
if data['password'] != '':
user.update(set__password=sha1(data['password']).hexdigest())
return JSON(message='Account updated successfully!')
@app.route('/me', methods=['DELETE'])
@cors(origin='*', methods=['DELETE'])
@require_auth(roles='*')
def me_delete():
user = User.objects(email=request.user['email'])
user.delete()
return JSON(message='Account deleted successfully!')
|
"""Registries used in package."""
import uuid
from .store import Store
class StoreRegistry(type):
"""
When added to a class, automatically adds instances of that class to the
central storage database.
"""
def __call__(cls, *args, **kwargs):
# Import in the call function to avoid circular imports
# from .store import store
instance = super(StoreRegistry, cls).__call__(*args, **kwargs)
# Assign the instance a unique identifier
instance._identifier = str(uuid.uuid4())
store.register(instance)
return instance
|
from twisted.cred import error
class BadPassword(error.UnauthorizedLogin):
pass
class NoSuchUser(error.UnauthorizedLogin):
pass
class ImaginaryError(Exception):
pass
class NoSuchCommand(ImaginaryError):
"""
There is no command like the one you tried to execute.
"""
class AmbiguousArgument(ImaginaryError):
"""
One or more of the inputs specified can not be narrowed down to
just one thing. This can be due to the presence of multiple
things with similar names, or due to the absence of anything named
similarly to the given input.
@ivar action: The action which was being processed when an ambiguity was
found.
@type part: C{str}
@ivar part: The part of the command which was ambiguous.
Typically something like 'target' or 'tool'.
@type partValue: C{str}
@ivar partValue: The string which was supplied by the user for the indicated part.
@type objects: C{list} of C{IThing}
@ivar objects: The objects which were involved in the ambiguity.
"""
def __init__(self, action, part, partValue, objects):
ImaginaryError.__init__(self, action, part, partValue, objects)
self.action = action
self.part = part
self.partValue = partValue
self.objects = objects
class ActionFailure(ImaginaryError):
"""
Wrapper exception for an Event that caused an action to fail (such that the
transaction in which it was running should be reverted).
"""
def __init__(self, event):
ImaginaryError.__init__(self)
self.event = event
def __repr__(self):
return '<Action Failure: %r>' % (self.event,)
class ThingNotFound(ImaginaryError):
"""
Resolving a Thing by identity failed.
"""
class DoesntFit(ImaginaryError):
"""
An object tried to go into a container, but the container was full.
"""
class Closed(ImaginaryError):
"""
An object tried to go into a container, but the container was closed.
"""
class CannotMove(ImaginaryError):
"""
An object tried to move but it was not portable so it couldn't.
"""
|
from . import IntercomError
from .client import IntercomAPI
class User(object):
endpoint = "users"
attributes = (
"user_id",
"email",
"id",
"signed_up_at",
"name",
"last_seen_ip",
"custom_attributes",
"last_seen_user_agent",
"companies",
"last_request_at",
"unsubscribed_from_emails",
)
def _extract_companies(self, companies):
result = []
for comp in companies["companies"]:
result.append({"company_id": comp["company_id"], "name": comp["name"]})
return result
def __init__(self, **kwargs):
for item in kwargs.keys():
if item == "companies":
setattr(self, item, self._extract_companies(kwargs[item]))
continue
setattr(self, item, kwargs[item])
@classmethod
def _get_user_params(cls, user_id, email):
if not user_id and not email:
raise IntercomError("Must specify either a user_id or email.")
params = {}
if user_id is not None:
params["user_id"] = user_id
if email is not None:
params["email"] = email
return params
@classmethod
def create(cls, user_id=None, email=None, new_session=False, update_last_request_at=False, **kwargs):
data = cls._get_user_params(user_id, email)
data["update_last_request_at"] = update_last_request_at
data["new_session"] = new_session
data.update(kwargs)
return IntercomAPI.request("POST", cls.endpoint, data=data)
@classmethod
def update_last_seen(cls, user_id, last_request_at):
data = {"user_id": user_id, "last_request_at": last_request_at}
return IntercomAPI.request("POST", cls.endpoint, data=data)
@classmethod
def bulk_create_users(cls, users):
return IntercomAPI.request("POST", cls.endpoint, data={"users": users})
@classmethod
def list(cls, tag_id=None, segment_id=None):
params = {"page": 1}
if tag_id is not None:
params["tag_id"] = tag_id
if segment_id is not None:
params["segment_id"] = segment_id
data = IntercomAPI.request("GET", cls.endpoint, params=params)
params["page"] += 1
results = data["users"]
pages = int(data["pages"]["total_pages"])
while params["page"] <= pages:
data = IntercomAPI.request("GET", cls.endpoint, params=params)
results += data["users"]
params["page"] += 1
return [cls(**item) for item in results]
@classmethod
def get(cls, user_id=None, email=None):
params = cls._get_user_params(user_id, email)
data = IntercomAPI.request("GET", cls.endpoint, params=params)
return cls(**data)
@classmethod
def delete_user(cls, user_id=None, email=None):
params = cls._get_user_params(user_id, email)
return IntercomAPI.request("DELETE", cls.endpoint, params=params)
def save(self, update_last_request_at=False, new_session=False):
data = {"new_session": new_session, "update_last_request_at": update_last_request_at}
for attribute in self.attributes:
if hasattr(self, attribute) and getattr(self, attribute) is not None:
data[attribute] = getattr(self, attribute)
new_data = IntercomAPI.request("POST", self.endpoint, data=data)
self.__init__(**new_data)
def delete(self):
IntercomAPI.request("DELETE", self.endpoint, params={"user_id": self.user_id})
|
from petpvc import petpvc4DCommand
from pvc_template import *
file_format="NIFTI"
separate_labels=True
class pvcCommand(petpvc4DCommand):
_suffix='VC'
def check_options(pvcNode, opts):
if opts.scanner_fwhm != None: pvcNode.inputs.z_fwhm=opts.scanner_fwhm[0]
if opts.scanner_fwhm != None: pvcNode.inputs.y_fwhm=opts.scanner_fwhm[1]
if opts.scanner_fwhm != None: pvcNode.inputs.x_fwhm=opts.scanner_fwhm[2]
if opts.max_iterations != None: pvcNode.inputs.iterations=opts.max_iterations
if opts.k != None: pvcNode.inputs.iterations=opts.k
return pvcNode
|
from cxio.cx_reader import CxReader
fi = open('example_data/example0.cx', 'r')
cx_reader = CxReader(fi)
print('pre meta data: ')
for e in cx_reader.get_pre_meta_data():
print(e)
print()
print()
for e in cx_reader.aspect_elements():
print(e)
print()
print()
print('post meta data:')
for e in cx_reader.get_post_meta_data():
print(e)
print()
print()
for name, count in cx_reader.get_aspect_element_counts().items():
print(name + ': ' + str(count))
print()
print()
print('OK')
|
import logging
from neuro import reshape
import neuro
import numpy
from reikna.algorithms import PureParallel
from reikna.core import Parameter
from reikna.core.signature import Annotation
log = logging.getLogger("classification")
def classification_delta_kernel(ctx, outputs, targets, deltas):
kernel_cache, thread = ctx.kernel_cache, ctx.thread
assert outputs.shape[0] == targets.shape[0] == deltas.shape[0]
assert len(targets.shape) == 1
assert targets.dtype == numpy.int32
assert outputs.shape[1] == deltas.shape[1]
key = (classification_delta_kernel, outputs.shape)
if not key in kernel_cache.keys():
log.info("compiling " + str(key))
kernel = PureParallel(
[
Parameter('outputs', Annotation(outputs, 'i')),
Parameter('targets', Annotation(targets, 'i')),
Parameter('deltas', Annotation(deltas, 'o'))
],
"""
${outputs.ctype} out = ${outputs.load_same};
SIZE_T t = ${targets.load_idx}(${idxs[0]});
SIZE_T idx = ${idxs[1]};
${deltas.ctype} d;
if (t == idx) {
d = 1.0f - out;
} else {
d = -out;
}
${deltas.store_same}(d);
""", guiding_array='deltas')
kernel_cache[key] = kernel.compile(thread)
# Run kernel
kernel_cache[key](outputs, targets, deltas)
def class_errors(ctx, expected, actual, errors):
""" expected int32, actual float, errors int32 """
kernel_cache, thread = ctx.kernel_cache, ctx.thread
key = (class_errors, expected.shape)
if key not in kernel_cache.keys():
# target should be an integer
logging.info("compiling " + str(key))
assert expected.shape == errors.shape # one neuron per class
assert expected.shape == (actual.shape[0],) # index of the class
assert actual.dtype == numpy.float32
assert expected.dtype == numpy.int32
assert errors.dtype == numpy.int32
kernel = PureParallel(
[
Parameter('expected', Annotation(expected, 'i')),
Parameter('actual', Annotation(actual, 'i')),
Parameter('errors', Annotation(errors, 'o'))
],
"""
SIZE_T expected = ${expected.load_idx}(${idxs[0]});;
float maximum=0.0f;
float value;
SIZE_T maxindex = 0;
SIZE_T tl = ${target_length};
// calculate argmax
for(SIZE_T j=0; j < tl; j++) {
value = ${actual.load_idx}(${idxs[0]}, j);
if (value > maximum) {
maximum = value;
maxindex = j;
}
}
// If the confidence is too low, return an error
if (maximum < (1.0f / ${target_length}.0f + 0.001f)) {
${errors.store_same}(1);
return;
};
// compare argmax
if (maxindex != expected) {
${errors.store_same}(1);
} else {
${errors.store_same}(0);
}
""", guiding_array='expected', render_kwds={'target_length' : numpy.int32(actual.shape[1])})
kernel_cache[key] = kernel.compile(thread)
kernel_cache[key](expected, actual, errors)
class ClassificationNetwork(object):
"""
Defines the ouput of a neural network to solve a regression task.
"""
def __init__(self, **kwargs):
super(ClassificationNetwork, self).__init__(**kwargs)
log.info("Classification constructor")
self.targets_dtype = numpy.int32
self.error_measure = "Classification Errors"
def create_state(self, num_patterns):
state = super(ClassificationNetwork, self).create_state(num_patterns)
ctx = self.context
shp = (num_patterns,)
state.classification_errors = ctx.thread.array(shp, dtype=numpy.int32)
return state
def get_target_shape(self):
# in a classification network, the target values are just the index of the correct class
return ()
def get_target_dtype(self):
return numpy.int32
def delta(self, network_state, targets):
"""
Classes must be coded as integers. Each integer is one class.
"""
super(ClassificationNetwork, self).delta(network_state, targets)
outputs = network_state.layers[-1].activations
deltas = network_state.layers[-1].deltas
classification_delta_kernel(self.context, outputs, targets, deltas)
def add_layer(self, LayerClass, **kwargs):
super(ClassificationNetwork, self).add_layer(LayerClass, **kwargs)
def error(self, inputs, targets, network_state):
"""
Calculate the classification error.
"""
self.propagate(network_state, inputs)
outputs = network_state.layers[-1].activations
class_errors(self.context, targets, outputs, network_state.classification_errors)
self.context.sum(network_state.classification_errors, network_state.error)
return network_state.error.get()
|
from urllib.request import urlopen
from xml.etree.ElementTree import parse
u = urlopen('http://planet.python.org/rss20.xml')
doc = parse(u)
for item in doc.iterfind('channel/item'):
title = item.findtext('title')
date = item.findtext('pubDate')
link = item.findtext('link')
print(title)
print(date)
print(link)
print()
print("Program executed.")
|
"""
Django settings for REMBUGDESA project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '+y5rt^l+7kt_t=ns9o&(d#yiju0a9rg!+gur5%1k6mz6%ut)y_'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'import_export',
'rest_framework',
'MASTER',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'REMBUGDESA.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'REMBUGDESA.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'rembugdesa',
'USER': 'postgres',
'PASSWORD': 'wicdt',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
|
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from socialbeer.posts.models import Post
from socialregistration.models import TwitterProfile
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for post in Post.objects.all():
if post.tweeter_id:
twitter_id = post.tweeter_id
tp = TwitterProfile.objects.get(twitter_id=twitter_id)
post.author = tp.user
post.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'beers.beer': {
'Meta': {'object_name': 'Beer'},
'brewery': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beers.Brewery']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beers.BeerType']", 'null': 'True', 'blank': 'True'})
},
'beers.beertype': {
'Meta': {'object_name': 'BeerType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'beers.brewery': {
'Meta': {'object_name': 'Brewery'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'posts.post': {
'Meta': {'ordering': "['-pub_date']", 'object_name': 'Post'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'beer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beers.Beer']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'live': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'tweet_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'tweeter_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'tweeter_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tweeter_profile_image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['posts']
|
from pyembed.core import PyEmbed
from pyembed.jinja2 import Jinja2Renderer
import vcr
@vcr.use_cassette('pyembed/jinja2/test/fixtures/cassettes/embed_template.yml')
def test_should_embed_with_jinja2_template():
renderer = Jinja2Renderer('pyembed/jinja2/test/fixtures')
embedding = PyEmbed(renderer=renderer).embed(
'http://www.youtube.com/watch?v=qrO4YZeyl0I')
assert embedding == \
'Lady Gaga - Bad Romance by LadyGagaVEVO from ' + \
'http://www.youtube.com/watch?v=qrO4YZeyl0I'
|
"""
Elementary Math Library. (For demonstration purpose only)
"""
__version__ = "0.0.8"
__short_description__ = "Elementary Mathematics."
__license__ = "MIT"
__author__ = "Sanhe Hu"
__author_email__ = "husanhe@me.com"
__maintainer__ = "Sanhe Hu"
__maintainer_email__ = "husanhe@me.com"
__github_username__ = "MacHu-GWU"
|
from binder.permissions.views import PermissionView
from ..models.city import CityState, City, PermanentCity
class CityView(PermissionView):
model = City
class CityStateView(PermissionView):
model = CityState
class PermanentCityView(PermissionView):
model = PermanentCity
|
import re
import urllib2
from pprint import pprint
data_file = open("y15.txt","w")
for i in range(150000, 150999):
link = "http://oa.cc.iitk.ac.in:8181/Oa/Jsp/OAServices/IITk_SrchRes.jsp?typ=stud&numtxt="+str(i)+"&sbm=Y"
data_student = urllib2.urlopen(link).read()
if data_student.find("HALL2") >=0:
name = re.search(r"<b>Name: <\/b>([\n\ \ ])([0-9a-zA-Z\ \ ]*)",data_student)
name_str = str(i)+" "+name.group(2).strip()+"\n"
data_file.write(name_str)
data_file.close()
data_file = open("y14.txt","w")
for i in range(14000, 14999):
link = "http://oa.cc.iitk.ac.in:8181/Oa/Jsp/OAServices/IITk_SrchRes.jsp?typ=stud&numtxt="+str(i)+"&sbm=Y"
data_student = urllib2.urlopen(link).read()
if data_student.find("HALL2") >=0:
name = re.search(r"<b>Name: <\/b>([\n\ \ ])([0-9a-zA-Z\ \ ]*)",data_student)
name_str = str(i)+" "+name.group(2).strip()+"\n"
data_file.write(name_str)
data_file.close()
data_file = open("y13.txt","w")
for i in range(13000, 13999):
link = "http://oa.cc.iitk.ac.in:8181/Oa/Jsp/OAServices/IITk_SrchRes.jsp?typ=stud&numtxt="+str(i)+"&sbm=Y"
data_student = urllib2.urlopen(link).read()
if data_student.find("HALL2") >=0:
name = re.search(r"<b>Name: <\/b>([\n\ \ ])([0-9a-zA-Z\ \ ]*)",data_student)
name_str = str(i)+" "+name.group(2).strip()+"\n"
data_file.write(name_str)
data_file.close()
|
import unittest
from cellardoor.storage import Storage
class TestStorage(unittest.TestCase):
def test_abstract_storage(self):
storage = Storage()
with self.assertRaises(NotImplementedError):
storage.get(None)
with self.assertRaises(NotImplementedError):
storage.get_by_ids(None, None)
with self.assertRaises(NotImplementedError):
storage.get_by_id(None, None)
with self.assertRaises(NotImplementedError):
storage.create(None, None)
with self.assertRaises(NotImplementedError):
storage.update(None, None, None)
with self.assertRaises(NotImplementedError):
storage.delete(None, None)
with self.assertRaises(NotImplementedError):
storage.check_filter(None, None, None)
|
import sys, os
from winappdbg import *
'''
ar_buggery_auto.py, sebastian apelt, siberas, 2016
winappdbg script to simpulate the write-0 vulnerability as described in my syscan360 slides
usage: ar_buggery_auto.py <path to acrord32.exe> <pdf> <isPropertySpecified-offset>
how to get the isPropertySpecified-offset:
use the XFAnalyze_func.py script or add acroform.pdb and do "? AcroForm!METHOD_node_isPropertySpecified - Acroform" in Windbg
'''
if len(sys.argv) < 4:
print "%s <path to acrord32.exe> <pdf> <isPropertySpecified-offset>" % sys.argv[0]
sys.exit(1)
acrord32 = sys.argv[1]
pdf = sys.argv[2]
bp_offset = int(sys.argv[3], 16)
if not os.path.exists(acrord32):
print "could not find acrord32.exe binary '%s'" % acrord32
sys.exit(1)
if not os.path.exists(pdf):
print "could not find target pdf '%s'" % pdf
sys.exit(1)
def bp_callback( event ):
process = event.get_process()
stackaddr = event.get_thread().get_sp()
# ".printf \"DBG: %ma\\r\\n\", poi(poi(esp+c)+10);"
address = process.read_pointer(process.read_pointer(stackaddr + 0x0c) + 0x10)
dbgstr = process.peek_string(address)
if dbgstr.startswith("write0:") == True:
targetaddr = int(dbgstr.split("write0:")[-1], 16)
print "[!] perform write0 to targetaddress 0x%x" % targetaddr
process.write_uint(targetaddr, 0)
else:
print dbgstr
bpset = False
def cbhandler(event):
global bpset
code = event.get_event_code()
if code == win32.EXCEPTION_DEBUG_EVENT:
name = event.get_exception_description()
code = event.get_exception_code()
try:
exc_address = event.get_fault_address()
except NotImplementedError:
exc_address = event.get_exception_address()
elif code == win32.LOAD_DLL_DEBUG_EVENT:
filename = event.get_filename()
if filename.lower().find("acroform") != -1 and bpset == False:
module = event.get_module()
global bp_offset
bp_address = module.get_base() + bp_offset
print "[i] set breakpoint @ 0x%x" % bp_address
event.debug.break_at( event.get_pid(), bp_address, bp_callback )
bpset = True
debug = Debug(cbhandler, bKillOnExit = True)
try:
debug.execv([acrord32, pdf], bFollow = True)
debug.loop()
finally:
debug.stop()
|
from ..models import Model
class Daq(Model):
def __init__(self):
super().__init__()
pass
def initialize(self):
pass
def finalize(self):
pass
def add_task(self):
pass
def get_task(self):
pass
|
from setuptools import setup, find_packages
readme = open('README.md').read()
setup(name='SimpleRender',
version='0.1.2',
author='Nick Otter',
author_email='otternq@gmail.com',
url='https://github.com/otternq/SimpleRender',
license='MIT',
description='Takes a config file and a template and renders',
long_description=readme,
packages=find_packages(),
scripts=['bin/render.py'])
|
"""This module contains all of the core logic for beets' command-line
interface. To invoke the CLI, just call beets.ui.main(). The actual
CLI commands are implemented in the ui.commands module.
"""
import os
import locale
import optparse
import textwrap
import ConfigParser
import sys
from difflib import SequenceMatcher
import logging
import sqlite3
import errno
import re
from beets import library
from beets import plugins
from beets import util
CONFIG_PATH_VAR = 'BEETSCONFIG'
DEFAULT_CONFIG_FILENAME_UNIX = '.beetsconfig'
DEFAULT_CONFIG_FILENAME_WINDOWS = 'beetsconfig.ini'
DEFAULT_LIBRARY_FILENAME_UNIX = '.beetsmusic.blb'
DEFAULT_LIBRARY_FILENAME_WINDOWS = 'beetsmusic.blb'
DEFAULT_DIRECTORY_NAME = 'Music'
WINDOWS_BASEDIR = os.environ.get('APPDATA') or '~'
PF_KEY_QUERIES = {
'comp': 'comp:true',
'singleton': 'singleton:true',
}
DEFAULT_PATH_FORMATS = [
(library.PF_KEY_DEFAULT, '$albumartist/$album/$track $title'),
(PF_KEY_QUERIES['singleton'], 'Non-Album/$artist/$title'),
(PF_KEY_QUERIES['comp'], 'Compilations/$album/$track $title'),
]
DEFAULT_ART_FILENAME = 'cover'
DEFAULT_TIMEOUT = 5.0
class UserError(Exception):
pass
def _encoding():
"""Tries to guess the encoding uses by the terminal."""
try:
return locale.getdefaultlocale()[1] or 'utf8'
except ValueError:
# Invalid locale environment variable setting. To avoid
# failing entirely for no good reason, assume UTF-8.
return 'utf8'
def decargs(arglist):
"""Given a list of command-line argument bytestrings, attempts to
decode them to Unicode strings.
"""
return [s.decode(_encoding()) for s in arglist]
def print_(*strings):
"""Like print, but rather than raising an error when a character
is not in the terminal's encoding's character set, just silently
replaces it.
"""
if strings:
if isinstance(strings[0], unicode):
txt = u' '.join(strings)
else:
txt = ' '.join(strings)
else:
txt = u''
if isinstance(txt, unicode):
txt = txt.encode(_encoding(), 'replace')
print txt
def input_options(options, require=False, prompt=None, fallback_prompt=None,
numrange=None, default=None, color=False, max_width=72):
"""Prompts a user for input. The sequence of `options` defines the
choices the user has. A single-letter shortcut is inferred for each
option; the user's choice is returned as that single, lower-case
letter. The options should be provided as lower-case strings unless
a particular shortcut is desired; in that case, only that letter
should be capitalized.
By default, the first option is the default. If `require` is
provided, then there is no default. `default` can be provided to
override this. The prompt and fallback prompt are also inferred but
can be overridden.
If numrange is provided, it is a pair of `(high, low)` (both ints)
indicating that, in addition to `options`, the user may enter an
integer in that inclusive range.
`max_width` specifies the maximum number of columns in the
automatically generated prompt string.
"""
# Assign single letters to each option. Also capitalize the options
# to indicate the letter.
letters = {}
display_letters = []
capitalized = []
first = True
for option in options:
# Is a letter already capitalized?
for letter in option:
if letter.isalpha() and letter.upper() == letter:
found_letter = letter
break
else:
# Infer a letter.
for letter in option:
if not letter.isalpha():
continue # Don't use punctuation.
if letter not in letters:
found_letter = letter
break
else:
raise ValueError('no unambiguous lettering found')
letters[found_letter.lower()] = option
index = option.index(found_letter)
# Mark the option's shortcut letter for display.
if (default is None and not numrange and first) \
or (isinstance(default, basestring) and
found_letter.lower() == default.lower()):
# The first option is the default; mark it.
show_letter = '[%s]' % found_letter.upper()
is_default = True
else:
show_letter = found_letter.upper()
is_default = False
# Possibly colorize the letter shortcut.
if color:
color = 'turquoise' if is_default else 'blue'
show_letter = colorize(color, show_letter)
# Insert the highlighted letter back into the word.
capitalized.append(
option[:index] + show_letter + option[index+1:]
)
display_letters.append(found_letter.upper())
first = False
# The default is just the first option if unspecified.
if default is None:
if require:
default = None
elif numrange:
default = numrange[0]
else:
default = display_letters[0].lower()
# Make a prompt if one is not provided.
if not prompt:
prompt_parts = []
prompt_part_lengths = []
if numrange:
if isinstance(default, int):
default_name = str(default)
if color:
default_name = colorize('turquoise', default_name)
tmpl = '# selection (default %s)'
prompt_parts.append(tmpl % default_name)
prompt_part_lengths.append(len(tmpl % str(default)))
else:
prompt_parts.append('# selection')
prompt_part_lengths.append(prompt_parts[-1])
prompt_parts += capitalized
prompt_part_lengths += [len(s) for s in options]
# Wrap the query text.
prompt = ''
line_length = 0
for i, (part, length) in enumerate(zip(prompt_parts,
prompt_part_lengths)):
# Add punctuation.
if i == len(prompt_parts) - 1:
part += '?'
else:
part += ','
length += 1
# Choose either the current line or the beginning of the next.
if line_length + length + 1 > max_width:
prompt += '\n'
line_length = 0
if line_length != 0:
# Not the beginning of the line; need a space.
part = ' ' + part
length += 1
prompt += part
line_length += length
# Make a fallback prompt too. This is displayed if the user enters
# something that is not recognized.
if not fallback_prompt:
fallback_prompt = 'Enter one of '
if numrange:
fallback_prompt += '%i-%i, ' % numrange
fallback_prompt += ', '.join(display_letters) + ':'
# (raw_input(prompt) was causing problems with colors.)
print prompt,
resp = raw_input()
while True:
resp = resp.strip().lower()
# Try default option.
if default is not None and not resp:
resp = default
# Try an integer input if available.
if numrange:
try:
resp = int(resp)
except ValueError:
pass
else:
low, high = numrange
if low <= resp <= high:
return resp
else:
resp = None
# Try a normal letter input.
if resp:
resp = resp[0]
if resp in letters:
return resp
# Prompt for new input.
print fallback_prompt,
resp = raw_input()
def input_yn(prompt, require=False, color=False):
"""Prompts the user for a "yes" or "no" response. The default is
"yes" unless `require` is `True`, in which case there is no default.
"""
sel = input_options(
('y', 'n'), require, prompt, 'Enter Y or N:', color=color
)
return sel == 'y'
def config_val(config, section, name, default, vtype=None):
"""Queries the configuration file for a value (given by the section
and name). If no value is present, returns default. vtype
optionally specifies the return type (although only ``bool`` and
``list`` are supported for now).
"""
if not config.has_section(section):
config.add_section(section)
try:
if vtype is bool:
return config.getboolean(section, name)
elif vtype is list:
# Whitespace-separated strings.
strval = config.get(section, name, True)
return strval.split()
else:
return config.get(section, name, True)
except ConfigParser.NoOptionError:
return default
def human_bytes(size):
"""Formats size, a number of bytes, in a human-readable way."""
suffices = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'HB']
for suffix in suffices:
if size < 1024:
return "%3.1f %s" % (size, suffix)
size /= 1024.0
return "big"
def human_seconds(interval):
"""Formats interval, a number of seconds, as a human-readable time
interval.
"""
units = [
(1, 'second'),
(60, 'minute'),
(60, 'hour'),
(24, 'day'),
(7, 'week'),
(52, 'year'),
(10, 'decade'),
]
for i in range(len(units)-1):
increment, suffix = units[i]
next_increment, _ = units[i+1]
interval /= float(increment)
if interval < next_increment:
break
else:
# Last unit.
increment, suffix = units[-1]
interval /= float(increment)
return "%3.1f %ss" % (interval, suffix)
COLOR_ESCAPE = "\x1b["
DARK_COLORS = ["black", "darkred", "darkgreen", "brown", "darkblue",
"purple", "teal", "lightgray"]
LIGHT_COLORS = ["darkgray", "red", "green", "yellow", "blue",
"fuchsia", "turquoise", "white"]
RESET_COLOR = COLOR_ESCAPE + "39;49;00m"
def colorize(color, text):
"""Returns a string that prints the given text in the given color
in a terminal that is ANSI color-aware. The color must be something
in DARK_COLORS or LIGHT_COLORS.
"""
if color in DARK_COLORS:
escape = COLOR_ESCAPE + "%im" % (DARK_COLORS.index(color) + 30)
elif color in LIGHT_COLORS:
escape = COLOR_ESCAPE + "%i;01m" % (LIGHT_COLORS.index(color) + 30)
else:
raise ValueError('no such color %s', color)
return escape + text + RESET_COLOR
def colordiff(a, b, highlight='red'):
"""Given two values, return the same pair of strings except with
their differences highlighted in the specified color. Strings are
highlighted intelligently to show differences; other values are
stringified and highlighted in their entirety.
"""
if not isinstance(a, basestring) or not isinstance(b, basestring):
# Non-strings: use ordinary equality.
a = unicode(a)
b = unicode(b)
if a == b:
return a, b
else:
return colorize(highlight, a), colorize(highlight, b)
a_out = []
b_out = []
matcher = SequenceMatcher(lambda x: False, a, b)
for op, a_start, a_end, b_start, b_end in matcher.get_opcodes():
if op == 'equal':
# In both strings.
a_out.append(a[a_start:a_end])
b_out.append(b[b_start:b_end])
elif op == 'insert':
# Right only.
b_out.append(colorize(highlight, b[b_start:b_end]))
elif op == 'delete':
# Left only.
a_out.append(colorize(highlight, a[a_start:a_end]))
elif op == 'replace':
# Right and left differ.
a_out.append(colorize(highlight, a[a_start:a_end]))
b_out.append(colorize(highlight, b[b_start:b_end]))
else:
assert(False)
return u''.join(a_out), u''.join(b_out)
def default_paths(pathmod=None):
"""Produces the appropriate default config, library, and directory
paths for the current system. On Unix, this is always in ~. On
Windows, tries ~ first and then $APPDATA for the config and library
files (for backwards compatibility).
"""
pathmod = pathmod or os.path
windows = pathmod.__name__ == 'ntpath'
if windows:
windata = os.environ.get('APPDATA') or '~'
# Shorthand for joining paths.
def exp(*vals):
return pathmod.expanduser(pathmod.join(*vals))
config = exp('~', DEFAULT_CONFIG_FILENAME_UNIX)
if windows and not pathmod.exists(config):
config = exp(windata, DEFAULT_CONFIG_FILENAME_WINDOWS)
libpath = exp('~', DEFAULT_LIBRARY_FILENAME_UNIX)
if windows and not pathmod.exists(libpath):
libpath = exp(windata, DEFAULT_LIBRARY_FILENAME_WINDOWS)
libdir = exp('~', DEFAULT_DIRECTORY_NAME)
return config, libpath, libdir
def _get_replacements(config):
"""Given a ConfigParser, get the list of replacement pairs. If no
replacements are specified, returns None. Otherwise, returns a list
of (compiled regex, replacement string) pairs.
"""
repl_string = config_val(config, 'beets', 'replace', None)
if not repl_string:
return
parts = repl_string.strip().split()
if not parts:
return
if len(parts) % 2 != 0:
# Must have an even number of parts.
raise UserError(u'"replace" config value must consist of'
u' pattern/replacement pairs')
out = []
for index in xrange(0, len(parts), 2):
pattern = parts[index]
replacement = parts[index+1]
out.append((re.compile(pattern), replacement))
return out
def _get_path_formats(config):
"""Returns a list of path formats (query/template pairs); reflecting
the config's specified path formats.
"""
legacy_path_format = config_val(config, 'beets', 'path_format', None)
if legacy_path_format:
# Old path formats override the default values.
path_formats = [(library.PF_KEY_DEFAULT, legacy_path_format)]
else:
# If no legacy path format, use the defaults instead.
path_formats = DEFAULT_PATH_FORMATS
if config.has_section('paths'):
custom_path_formats = []
for key, value in config.items('paths', True):
if key in PF_KEY_QUERIES:
# Special values that indicate simple queries.
key = PF_KEY_QUERIES[key]
elif key != library.PF_KEY_DEFAULT:
# For non-special keys (literal queries), the _
# character denotes a :.
key = key.replace('_', ':')
custom_path_formats.append((key, value))
path_formats = custom_path_formats + path_formats
return path_formats
class Subcommand(object):
"""A subcommand of a root command-line application that may be
invoked by a SubcommandOptionParser.
"""
def __init__(self, name, parser=None, help='', aliases=()):
"""Creates a new subcommand. name is the primary way to invoke
the subcommand; aliases are alternate names. parser is an
OptionParser responsible for parsing the subcommand's options.
help is a short description of the command. If no parser is
given, it defaults to a new, empty OptionParser.
"""
self.name = name
self.parser = parser or optparse.OptionParser()
self.aliases = aliases
self.help = help
class SubcommandsOptionParser(optparse.OptionParser):
"""A variant of OptionParser that parses subcommands and their
arguments.
"""
# A singleton command used to give help on other subcommands.
_HelpSubcommand = Subcommand('help', optparse.OptionParser(),
help='give detailed help on a specific sub-command',
aliases=('?',))
def __init__(self, *args, **kwargs):
"""Create a new subcommand-aware option parser. All of the
options to OptionParser.__init__ are supported in addition
to subcommands, a sequence of Subcommand objects.
"""
# The subcommand array, with the help command included.
self.subcommands = list(kwargs.pop('subcommands', []))
self.subcommands.append(self._HelpSubcommand)
# A more helpful default usage.
if 'usage' not in kwargs:
kwargs['usage'] = """
%prog COMMAND [ARGS...]
%prog help COMMAND"""
# Super constructor.
optparse.OptionParser.__init__(self, *args, **kwargs)
# Adjust the help-visible name of each subcommand.
for subcommand in self.subcommands:
subcommand.parser.prog = '%s %s' % \
(self.get_prog_name(), subcommand.name)
# Our root parser needs to stop on the first unrecognized argument.
self.disable_interspersed_args()
def add_subcommand(self, cmd):
"""Adds a Subcommand object to the parser's list of commands.
"""
self.subcommands.append(cmd)
# Add the list of subcommands to the help message.
def format_help(self, formatter=None):
# Get the original help message, to which we will append.
out = optparse.OptionParser.format_help(self, formatter)
if formatter is None:
formatter = self.formatter
# Subcommands header.
result = ["\n"]
result.append(formatter.format_heading('Commands'))
formatter.indent()
# Generate the display names (including aliases).
# Also determine the help position.
disp_names = []
help_position = 0
for subcommand in self.subcommands:
name = subcommand.name
if subcommand.aliases:
name += ' (%s)' % ', '.join(subcommand.aliases)
disp_names.append(name)
# Set the help position based on the max width.
proposed_help_position = len(name) + formatter.current_indent + 2
if proposed_help_position <= formatter.max_help_position:
help_position = max(help_position, proposed_help_position)
# Add each subcommand to the output.
for subcommand, name in zip(self.subcommands, disp_names):
# Lifted directly from optparse.py.
name_width = help_position - formatter.current_indent - 2
if len(name) > name_width:
name = "%*s%s\n" % (formatter.current_indent, "", name)
indent_first = help_position
else:
name = "%*s%-*s " % (formatter.current_indent, "",
name_width, name)
indent_first = 0
result.append(name)
help_width = formatter.width - help_position
help_lines = textwrap.wrap(subcommand.help, help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (help_position, "", line)
for line in help_lines[1:]])
formatter.dedent()
# Concatenate the original help message with the subcommand
# list.
return out + "".join(result)
def _subcommand_for_name(self, name):
"""Return the subcommand in self.subcommands matching the
given name. The name may either be the name of a subcommand or
an alias. If no subcommand matches, returns None.
"""
for subcommand in self.subcommands:
if name == subcommand.name or \
name in subcommand.aliases:
return subcommand
return None
def parse_args(self, a=None, v=None):
"""Like OptionParser.parse_args, but returns these four items:
- options: the options passed to the root parser
- subcommand: the Subcommand object that was invoked
- suboptions: the options passed to the subcommand parser
- subargs: the positional arguments passed to the subcommand
"""
options, args = optparse.OptionParser.parse_args(self, a, v)
if not args:
# No command given.
self.print_help()
self.exit()
else:
cmdname = args.pop(0)
subcommand = self._subcommand_for_name(cmdname)
if not subcommand:
self.error('unknown command ' + cmdname)
suboptions, subargs = subcommand.parser.parse_args(args)
if subcommand is self._HelpSubcommand:
if subargs:
# particular
cmdname = subargs[0]
helpcommand = self._subcommand_for_name(cmdname)
helpcommand.parser.print_help()
self.exit()
else:
# general
self.print_help()
self.exit()
return options, subcommand, suboptions, subargs
def main(args=None, configfh=None):
"""Run the main command-line interface for beets."""
# Get the default subcommands.
from beets.ui.commands import default_commands
# Get default file paths.
default_config, default_libpath, default_dir = default_paths()
# Read defaults from config file.
config = ConfigParser.SafeConfigParser()
if configfh:
configpath = None
elif CONFIG_PATH_VAR in os.environ:
configpath = os.path.expanduser(os.environ[CONFIG_PATH_VAR])
else:
configpath = default_config
if configpath:
configpath = util.syspath(configpath)
if os.path.exists(util.syspath(configpath)):
configfh = open(configpath)
else:
configfh = None
if configfh:
config.readfp(configfh)
# Add plugin paths.
plugpaths = config_val(config, 'beets', 'pluginpath', '')
for plugpath in plugpaths.split(':'):
sys.path.append(os.path.expanduser(plugpath))
# Load requested plugins.
plugnames = config_val(config, 'beets', 'plugins', '')
plugins.load_plugins(plugnames.split())
plugins.load_listeners()
plugins.send("pluginload")
plugins.configure(config)
# Construct the root parser.
commands = list(default_commands)
commands += plugins.commands()
parser = SubcommandsOptionParser(subcommands=commands)
parser.add_option('-l', '--library', dest='libpath',
help='library database file to use')
parser.add_option('-d', '--directory', dest='directory',
help="destination music directory")
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
help='print debugging information')
# Parse the command-line!
options, subcommand, suboptions, subargs = parser.parse_args(args)
# Open library file.
libpath = options.libpath or \
config_val(config, 'beets', 'library', default_libpath)
directory = options.directory or \
config_val(config, 'beets', 'directory', default_dir)
path_formats = _get_path_formats(config)
art_filename = \
config_val(config, 'beets', 'art_filename', DEFAULT_ART_FILENAME)
lib_timeout = config_val(config, 'beets', 'timeout', DEFAULT_TIMEOUT)
replacements = _get_replacements(config)
try:
lib_timeout = float(lib_timeout)
except ValueError:
lib_timeout = DEFAULT_TIMEOUT
db_path = os.path.expanduser(libpath)
try:
lib = library.Library(db_path,
directory,
path_formats,
art_filename,
lib_timeout,
replacements)
except sqlite3.OperationalError:
raise UserError("database file %s could not be opened" % db_path)
# Configure the logger.
log = logging.getLogger('beets')
if options.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
log.debug(u'config file: %s' % configpath)
log.debug(u'library database: %s' % lib.path)
log.debug(u'library directory: %s' % lib.directory)
# Invoke the subcommand.
try:
subcommand.func(lib, config, suboptions, subargs)
except UserError, exc:
message = exc.args[0] if exc.args else None
subcommand.parser.error(message)
except IOError, exc:
if exc.errno == errno.EPIPE:
# "Broken pipe". End silently.
pass
else:
raise
|
import sys
from lib.compile import main
if __name__ == "__main__": main()
|
import random
class RandomizedSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.nums = []
self.indexTable = {}
def insert(self, val: int) -> bool:
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val in self.indexTable:
return False
self.indexTable[val] = len(self.nums)
self.nums.append(val)
return True
def remove(self, val: int) -> bool:
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val not in self.indexTable:
return False
index = self.indexTable[val]
last = self.nums[-1]
self.indexTable[last] = index
self.nums[index] = last
self.nums.pop()
del self.indexTable[val]
return True
def getRandom(self) -> int:
"""
Get a random element from the set.
"""
return random.choice(self.nums)
|
import uuid
from groundstation.proto.gizmo_pb2 import Gizmo
import groundstation.transfer.request
from groundstation.transfer import notification_handlers
from groundstation import logger
log = logger.getLogger(__name__)
class Notification(object):
VALID_NOTIFICATIONS = {
"NEWOBJECT": notification_handlers.handle_newobject
}
def __init__(self, verb, station=None, stream=None, payload=None, origin=None, remoteId=None):
self.type = "NOTIFICATION"
self.id = remoteId or uuid.uuid1()
self.verb = verb
self.station = station
self.stream = stream
self.payload = payload
# if origin:
# self.origin = uuid.UUID(origin)
self.origin = origin
# self.validate() # TODO
def _Request(self, *args, **kwargs):
kwargs['station'] = self.station
req = groundstation.transfer.request.Request(*args, **kwargs)
self.station.register_request(req)
return req
@classmethod
def from_gizmo(klass, gizmo, station, stream):
log.debug("Hydrating a notification from gizmo")
return Notification(gizmo.verb, station, stream, gizmo.payload, remoteId=gizmo.id)
def SerializeToString(self):
gizmo = self.station.gizmo_factory.gizmo()
gizmo.id = str(self.id)
gizmo.type = Gizmo.NOTIFICATION
gizmo.verb = self.verb
if self.payload:
gizmo.payload = self.payload
return gizmo.SerializeToString()
def process(self):
self.VALID_NOTIFICATIONS[self.verb](self)
# Boilerplate to appease protobuf
@property
def payload(self):
return self._payload
@payload.setter
def payload(self, value):
self._payload = str(value)
|
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
class OrderField(models.PositiveIntegerField):
def __init__(self, for_fields=None, *args, **kwargs):
self.for_fields = for_fields
super(OrderField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
if getattr(model_instance, self.attname) is None:
try:
qs = self.model.objects.all()
if self.for_fields:
query = {field: getattr(model_instance, field) for field in self.for_fields}
qs = qs.filter(**query)
last_item = qs.latest(self.attname)
value = last_item.order + 1
except ObjectDoesNotExist:
value = 0
setattr(model_instance, self.attname, value)
return value
else:
return super(OrderField, self).pre_save(model_instance, add)
|
from os import environ
APP_VERSION = '0.0.1'
APP_NAME = environ.get("APP_NAME", "taofeng139")
if 'sae.kvdb.file' in environ:
debug = True
else:
debug = False
SITE_TITLE = u"涛锋培训"
SITE_TITLE2 = u"绿色便捷的打印新时代"
SITE_SUB_TITLE = u"CloudPrint.Me" # 副标题
KEYWORDS = u"CloudPrint,云,打印,山东财经大学,创业,绿色,环保,省时"
SITE_DECR = u"CloudPrint 是一个 ********"
COPY_YEAR = '2013'
LANGUAGE = 'zh-CN'
ADMIN_NAME = u"baitao.ji"
NOTICE_MAIL = u"dreambt@126.com"
MOVE_SECRET = 'what?' # 迁移密码
THEME = 'taofeng139'
MAIL_FROM = 'postmaster@sdyspj.sendcloud.org'
MAIL_KEY = 'DSvK6ViH'
MAIL_SMTP = 'smtpcloud.sohu.com'
MAIL_PORT = 25
MAIL_TO = 'dev@cms4p.sendcloud.org'
ANALYTICS_CODE = """"""
ADSENSE_CODE1 = """"""
ADSENSE_CODE2 = """"""
EACH_PAGE_POST_NUM = 7 # 每页显示文章数
SHORTEN_CONTENT_WORDS = 512 # 文章列表截取的字符数
DESCRIPTION_CUT_WORDS = 100 # meta description 显示的字符数
RELATIVE_POST_NUM = 5 # 显示相关文章数
EACH_PAGE_COMMENT_NUM = 10 # 每页评论数
RECENT_COMMENT_NUM = 5 # 边栏显示最近评论数
RECENT_COMMENT_CUT_WORDS = 20 # 边栏评论显示字符数
MAX_COMMENT_NUM_A_DAY = 30 # 客户端设置Cookie 限制每天发的评论数
COMMENT_DEFAULT_VISIBLE = 1 # 0/1 #发表评论时是否显示 设为0时则需要审核才显示
PAGE_CACHE_TIME = 3600 * 24 # 默认页面缓存时间
LINK_NUM = 30 # 边栏显示的友情链接数
HOT_TAGS_NUM = 30 # 右侧热门标签显示数
MAX_ARCHIVES_NUM = 50 # 右侧热门标签显示数
ADMIN_CATEGORY_NUM = 12
ADMIN_POST_NUM = 12
ADMIN_COMMENT_NUM = 12
ADMIN_USER_NUM = 12
ADMIN_LINK_NUM = 12
NUM_SHARDS = 0 # 分片计数器的个数,人少的话用0就可以了,如果由0扩展到比如3,可能程序需要稍微修改一下
if NUM_SHARDS > 0:
SHARD_COUNT_SUPPORT = True # 是否支持分片计数器
else:
SHARD_COUNT_SUPPORT = False
LINK_BROLL = [
{"text": '思奇博客', "url": 'http://www.im47.cn'},
]
XML_RPC_ENDPOINTS = [
'http://blogsearch.google.com/ping/RPC2',
'http://rpc.pingomatic.com/',
'http://ping.baidu.com/ping/RPC2'
]
COOKIE_SECRET = '7nVA0WeZSJSzTCUF8UZB/C3OfLrl7k26iHxfnVa9x0I='
if debug:
MAJOR_DOMAIN = '%s.sinaapp.com' % APP_NAME
BASE_URL = 'http://localhost:8081'
STATIC_URL = BASE_URL
JQUERY = "http://lib.sinaapp.com/js/jquery/2.0.2/jquery-2.0.2.min.js"
JQUERY_IE = "http://lib.sinaapp.com/js/jquery/1.10.1/jquery-1.10.1.min.js"
else:
BASE_URL = 'http://1.cloudprint.sinaapp.com'
STATIC_URL = BASE_URL
JQUERY = STATIC_URL + "/static/js/vender/jquery-2.0.2.min.js"
JQUERY_IE = STATIC_URL + "/static/js/vender/jquery-1.10.1.min.js"
if not debug:
REDIS_HOST = "localhost"
REDIS_PORT = 6379
DEFAULT_BUCKET = 'attachment'
if debug:
MYSQL_DB = 'app_cms4p'
MYSQL_USER = 'root'
MYSQL_PASS = 'hisense2002j'
MYSQL_HOST_M = '127.0.0.1'
MYSQL_HOST_S = '127.0.0.1'
MYSQL_PORT = '3306'
MAX_IDLE_TIME = 10
else:
MYSQL_DB = 'app_taofeng139'
MYSQL_USER = 'root'
MYSQL_PASS = 'hisense2012JBT'
MYSQL_HOST_M = 'localhost'
MYSQL_HOST_S = 'localhost'
MYSQL_PORT = '3306'
MAX_IDLE_TIME = 30
|
import codecs
import warnings
import re
from contextlib import contextmanager
from parso.normalizer import Normalizer, NormalizerConfig, Issue, Rule
from parso.python.tree import search_ancestor
_BLOCK_STMTS = ('if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt')
_STAR_EXPR_PARENTS = ('testlist_star_expr', 'testlist_comp', 'exprlist')
_MAX_BLOCK_SIZE = 20
_MAX_INDENT_COUNT = 100
ALLOWED_FUTURES = (
'all_feature_names', 'nested_scopes', 'generators', 'division',
'absolute_import', 'with_statement', 'print_function', 'unicode_literals',
)
_COMP_FOR_TYPES = ('comp_for', 'sync_comp_for')
def _iter_stmts(scope):
"""
Iterates over all statements and splits up simple_stmt.
"""
for child in scope.children:
if child.type == 'simple_stmt':
for child2 in child.children:
if child2.type == 'newline' or child2 == ';':
continue
yield child2
else:
yield child
def _get_comprehension_type(atom):
first, second = atom.children[:2]
if second.type == 'testlist_comp' and second.children[1].type in _COMP_FOR_TYPES:
if first == '[':
return 'list comprehension'
else:
return 'generator expression'
elif second.type == 'dictorsetmaker' and second.children[-1].type in _COMP_FOR_TYPES:
if second.children[1] == ':':
return 'dict comprehension'
else:
return 'set comprehension'
return None
def _is_future_import(import_from):
# It looks like a __future__ import that is relative is still a future
# import. That feels kind of odd, but whatever.
# if import_from.level != 0:
# return False
from_names = import_from.get_from_names()
return [n.value for n in from_names] == ['__future__']
def _remove_parens(atom):
"""
Returns the inner part of an expression like `(foo)`. Also removes nested
parens.
"""
try:
children = atom.children
except AttributeError:
pass
else:
if len(children) == 3 and children[0] == '(':
return _remove_parens(atom.children[1])
return atom
def _iter_params(parent_node):
return (n for n in parent_node.children if n.type == 'param')
def _is_future_import_first(import_from):
"""
Checks if the import is the first statement of a file.
"""
found_docstring = False
for stmt in _iter_stmts(import_from.get_root_node()):
if stmt.type == 'string' and not found_docstring:
continue
found_docstring = True
if stmt == import_from:
return True
if stmt.type == 'import_from' and _is_future_import(stmt):
continue
return False
def _iter_definition_exprs_from_lists(exprlist):
for child in exprlist.children[::2]:
if child.type == 'atom' and child.children[0] in ('(', '['):
testlist_comp = child.children[0]
if testlist_comp.type == 'testlist_comp':
for expr in _iter_definition_exprs_from_lists(testlist_comp):
yield expr
continue
elif child.children[0] == '[':
yield testlist_comp
continue
yield child
def _get_expr_stmt_definition_exprs(expr_stmt):
exprs = []
for list_ in expr_stmt.children[:-2:2]:
if list_.type in ('testlist_star_expr', 'testlist'):
exprs += _iter_definition_exprs_from_lists(list_)
else:
exprs.append(list_)
return exprs
def _get_for_stmt_definition_exprs(for_stmt):
exprlist = for_stmt.children[1]
if exprlist.type != 'exprlist':
return [exprlist]
return list(_iter_definition_exprs_from_lists(exprlist))
class _Context(object):
def __init__(self, node, add_syntax_error, parent_context=None):
self.node = node
self.blocks = []
self.parent_context = parent_context
self._used_name_dict = {}
self._global_names = []
self._nonlocal_names = []
self._nonlocal_names_in_subscopes = []
self._add_syntax_error = add_syntax_error
def is_async_funcdef(self):
# Stupidly enough async funcdefs can have two different forms,
# depending if a decorator is used or not.
return self.is_function() \
and self.node.parent.type in ('async_funcdef', 'async_stmt')
def is_function(self):
return self.node.type == 'funcdef'
def add_name(self, name):
parent_type = name.parent.type
if parent_type == 'trailer':
# We are only interested in first level names.
return
if parent_type == 'global_stmt':
self._global_names.append(name)
elif parent_type == 'nonlocal_stmt':
self._nonlocal_names.append(name)
else:
self._used_name_dict.setdefault(name.value, []).append(name)
def finalize(self):
"""
Returns a list of nonlocal names that need to be part of that scope.
"""
self._analyze_names(self._global_names, 'global')
self._analyze_names(self._nonlocal_names, 'nonlocal')
# Python2.6 doesn't have dict comprehensions.
global_name_strs = dict((n.value, n) for n in self._global_names)
for nonlocal_name in self._nonlocal_names:
try:
global_name = global_name_strs[nonlocal_name.value]
except KeyError:
continue
message = "name '%s' is nonlocal and global" % global_name.value
if global_name.start_pos < nonlocal_name.start_pos:
error_name = global_name
else:
error_name = nonlocal_name
self._add_syntax_error(error_name, message)
nonlocals_not_handled = []
for nonlocal_name in self._nonlocal_names_in_subscopes:
search = nonlocal_name.value
if search in global_name_strs or self.parent_context is None:
message = "no binding for nonlocal '%s' found" % nonlocal_name.value
self._add_syntax_error(nonlocal_name, message)
elif not self.is_function() or \
nonlocal_name.value not in self._used_name_dict:
nonlocals_not_handled.append(nonlocal_name)
return self._nonlocal_names + nonlocals_not_handled
def _analyze_names(self, globals_or_nonlocals, type_):
def raise_(message):
self._add_syntax_error(base_name, message % (base_name.value, type_))
params = []
if self.node.type == 'funcdef':
params = self.node.get_params()
for base_name in globals_or_nonlocals:
found_global_or_nonlocal = False
# Somehow Python does it the reversed way.
for name in reversed(self._used_name_dict.get(base_name.value, [])):
if name.start_pos > base_name.start_pos:
# All following names don't have to be checked.
found_global_or_nonlocal = True
parent = name.parent
if parent.type == 'param' and parent.name == name:
# Skip those here, these definitions belong to the next
# scope.
continue
if name.is_definition():
if parent.type == 'expr_stmt' \
and parent.children[1].type == 'annassign':
if found_global_or_nonlocal:
# If it's after the global the error seems to be
# placed there.
base_name = name
raise_("annotated name '%s' can't be %s")
break
else:
message = "name '%s' is assigned to before %s declaration"
else:
message = "name '%s' is used prior to %s declaration"
if not found_global_or_nonlocal:
raise_(message)
# Only add an error for the first occurence.
break
for param in params:
if param.name.value == base_name.value:
raise_("name '%s' is parameter and %s"),
@contextmanager
def add_block(self, node):
self.blocks.append(node)
yield
self.blocks.pop()
def add_context(self, node):
return _Context(node, self._add_syntax_error, parent_context=self)
def close_child_context(self, child_context):
self._nonlocal_names_in_subscopes += child_context.finalize()
class ErrorFinder(Normalizer):
"""
Searches for errors in the syntax tree.
"""
def __init__(self, *args, **kwargs):
super(ErrorFinder, self).__init__(*args, **kwargs)
self._error_dict = {}
self.version = self.grammar.version_info
def initialize(self, node):
def create_context(node):
if node is None:
return None
parent_context = create_context(node.parent)
if node.type in ('classdef', 'funcdef', 'file_input'):
return _Context(node, self._add_syntax_error, parent_context)
return parent_context
self.context = create_context(node) or _Context(node, self._add_syntax_error)
self._indentation_count = 0
def visit(self, node):
if node.type == 'error_node':
with self.visit_node(node):
# Don't need to investigate the inners of an error node. We
# might find errors in there that should be ignored, because
# the error node itself already shows that there's an issue.
return ''
return super(ErrorFinder, self).visit(node)
@contextmanager
def visit_node(self, node):
self._check_type_rules(node)
if node.type in _BLOCK_STMTS:
with self.context.add_block(node):
if len(self.context.blocks) == _MAX_BLOCK_SIZE:
self._add_syntax_error(node, "too many statically nested blocks")
yield
return
elif node.type == 'suite':
self._indentation_count += 1
if self._indentation_count == _MAX_INDENT_COUNT:
self._add_indentation_error(node.children[1], "too many levels of indentation")
yield
if node.type == 'suite':
self._indentation_count -= 1
elif node.type in ('classdef', 'funcdef'):
context = self.context
self.context = context.parent_context
self.context.close_child_context(context)
def visit_leaf(self, leaf):
if leaf.type == 'error_leaf':
if leaf.token_type in ('INDENT', 'ERROR_DEDENT'):
# Indents/Dedents itself never have a prefix. They are just
# "pseudo" tokens that get removed by the syntax tree later.
# Therefore in case of an error we also have to check for this.
spacing = list(leaf.get_next_leaf()._split_prefix())[-1]
if leaf.token_type == 'INDENT':
message = 'unexpected indent'
else:
message = 'unindent does not match any outer indentation level'
self._add_indentation_error(spacing, message)
else:
if leaf.value.startswith('\\'):
message = 'unexpected character after line continuation character'
else:
match = re.match('\\w{,2}("{1,3}|\'{1,3})', leaf.value)
if match is None:
message = 'invalid syntax'
else:
if len(match.group(1)) == 1:
message = 'EOL while scanning string literal'
else:
message = 'EOF while scanning triple-quoted string literal'
self._add_syntax_error(leaf, message)
return ''
elif leaf.value == ':':
parent = leaf.parent
if parent.type in ('classdef', 'funcdef'):
self.context = self.context.add_context(parent)
# The rest is rule based.
return super(ErrorFinder, self).visit_leaf(leaf)
def _add_indentation_error(self, spacing, message):
self.add_issue(spacing, 903, "IndentationError: " + message)
def _add_syntax_error(self, node, message):
self.add_issue(node, 901, "SyntaxError: " + message)
def add_issue(self, node, code, message):
# Overwrite the default behavior.
# Check if the issues are on the same line.
line = node.start_pos[0]
args = (code, message, node)
self._error_dict.setdefault(line, args)
def finalize(self):
self.context.finalize()
for code, message, node in self._error_dict.values():
self.issues.append(Issue(node, code, message))
class IndentationRule(Rule):
code = 903
def _get_message(self, message):
message = super(IndentationRule, self)._get_message(message)
return "IndentationError: " + message
@ErrorFinder.register_rule(type='error_node')
class _ExpectIndentedBlock(IndentationRule):
message = 'expected an indented block'
def get_node(self, node):
leaf = node.get_next_leaf()
return list(leaf._split_prefix())[-1]
def is_issue(self, node):
# This is the beginning of a suite that is not indented.
return node.children[-1].type == 'newline'
class ErrorFinderConfig(NormalizerConfig):
normalizer_class = ErrorFinder
class SyntaxRule(Rule):
code = 901
def _get_message(self, message):
message = super(SyntaxRule, self)._get_message(message)
return "SyntaxError: " + message
@ErrorFinder.register_rule(type='error_node')
class _InvalidSyntaxRule(SyntaxRule):
message = "invalid syntax"
def get_node(self, node):
return node.get_next_leaf()
def is_issue(self, node):
# Error leafs will be added later as an error.
return node.get_next_leaf().type != 'error_leaf'
@ErrorFinder.register_rule(value='await')
class _AwaitOutsideAsync(SyntaxRule):
message = "'await' outside async function"
def is_issue(self, leaf):
return not self._normalizer.context.is_async_funcdef()
def get_error_node(self, node):
# Return the whole await statement.
return node.parent
@ErrorFinder.register_rule(value='break')
class _BreakOutsideLoop(SyntaxRule):
message = "'break' outside loop"
def is_issue(self, leaf):
in_loop = False
for block in self._normalizer.context.blocks:
if block.type in ('for_stmt', 'while_stmt'):
in_loop = True
return not in_loop
@ErrorFinder.register_rule(value='continue')
class _ContinueChecks(SyntaxRule):
message = "'continue' not properly in loop"
message_in_finally = "'continue' not supported inside 'finally' clause"
def is_issue(self, leaf):
in_loop = False
for block in self._normalizer.context.blocks:
if block.type in ('for_stmt', 'while_stmt'):
in_loop = True
if block.type == 'try_stmt':
last_block = block.children[-3]
if last_block == 'finally' and leaf.start_pos > last_block.start_pos:
self.add_issue(leaf, message=self.message_in_finally)
return False # Error already added
if not in_loop:
return True
@ErrorFinder.register_rule(value='from')
class _YieldFromCheck(SyntaxRule):
message = "'yield from' inside async function"
def get_node(self, leaf):
return leaf.parent.parent # This is the actual yield statement.
def is_issue(self, leaf):
return leaf.parent.type == 'yield_arg' \
and self._normalizer.context.is_async_funcdef()
@ErrorFinder.register_rule(type='name')
class _NameChecks(SyntaxRule):
message = 'cannot assign to __debug__'
message_none = 'cannot assign to None'
def is_issue(self, leaf):
self._normalizer.context.add_name(leaf)
if leaf.value == '__debug__' and leaf.is_definition():
return True
if leaf.value == 'None' and self._normalizer.version < (3, 0) \
and leaf.is_definition():
self.add_issue(leaf, message=self.message_none)
@ErrorFinder.register_rule(type='string')
class _StringChecks(SyntaxRule):
message = "bytes can only contain ASCII literal characters."
def is_issue(self, leaf):
string_prefix = leaf.string_prefix.lower()
if 'b' in string_prefix \
and self._normalizer.version >= (3, 0) \
and any(c for c in leaf.value if ord(c) > 127):
# b'ä'
return True
if 'r' not in string_prefix:
# Raw strings don't need to be checked if they have proper
# escaping.
is_bytes = self._normalizer.version < (3, 0)
if 'b' in string_prefix:
is_bytes = True
if 'u' in string_prefix:
is_bytes = False
payload = leaf._get_payload()
if is_bytes:
payload = payload.encode('utf-8')
func = codecs.escape_decode
else:
func = codecs.unicode_escape_decode
try:
with warnings.catch_warnings():
# The warnings from parsing strings are not relevant.
warnings.filterwarnings('ignore')
func(payload)
except UnicodeDecodeError as e:
self.add_issue(leaf, message='(unicode error) ' + str(e))
except ValueError as e:
self.add_issue(leaf, message='(value error) ' + str(e))
@ErrorFinder.register_rule(value='*')
class _StarCheck(SyntaxRule):
message = "named arguments must follow bare *"
def is_issue(self, leaf):
params = leaf.parent
if params.type == 'parameters' and params:
after = params.children[params.children.index(leaf) + 1:]
after = [child for child in after
if child not in (',', ')') and not child.star_count]
return len(after) == 0
@ErrorFinder.register_rule(value='**')
class _StarStarCheck(SyntaxRule):
# e.g. {**{} for a in [1]}
# TODO this should probably get a better end_pos including
# the next sibling of leaf.
message = "dict unpacking cannot be used in dict comprehension"
def is_issue(self, leaf):
if leaf.parent.type == 'dictorsetmaker':
comp_for = leaf.get_next_sibling().get_next_sibling()
return comp_for is not None and comp_for.type in _COMP_FOR_TYPES
@ErrorFinder.register_rule(value='yield')
@ErrorFinder.register_rule(value='return')
class _ReturnAndYieldChecks(SyntaxRule):
message = "'return' with value in async generator"
message_async_yield = "'yield' inside async function"
def get_node(self, leaf):
return leaf.parent
def is_issue(self, leaf):
if self._normalizer.context.node.type != 'funcdef':
self.add_issue(self.get_node(leaf), message="'%s' outside function" % leaf.value)
elif self._normalizer.context.is_async_funcdef() \
and any(self._normalizer.context.node.iter_yield_exprs()):
if leaf.value == 'return' and leaf.parent.type == 'return_stmt':
return True
elif leaf.value == 'yield' \
and leaf.get_next_leaf() != 'from' \
and self._normalizer.version == (3, 5):
self.add_issue(self.get_node(leaf), message=self.message_async_yield)
@ErrorFinder.register_rule(type='strings')
class _BytesAndStringMix(SyntaxRule):
# e.g. 's' b''
message = "cannot mix bytes and nonbytes literals"
def _is_bytes_literal(self, string):
if string.type == 'fstring':
return False
return 'b' in string.string_prefix.lower()
def is_issue(self, node):
first = node.children[0]
# In Python 2 it's allowed to mix bytes and unicode.
if self._normalizer.version >= (3, 0):
first_is_bytes = self._is_bytes_literal(first)
for string in node.children[1:]:
if first_is_bytes != self._is_bytes_literal(string):
return True
@ErrorFinder.register_rule(type='import_as_names')
class _TrailingImportComma(SyntaxRule):
# e.g. from foo import a,
message = "trailing comma not allowed without surrounding parentheses"
def is_issue(self, node):
if node.children[-1] == ',':
return True
@ErrorFinder.register_rule(type='import_from')
class _ImportStarInFunction(SyntaxRule):
message = "import * only allowed at module level"
def is_issue(self, node):
return node.is_star_import() and self._normalizer.context.parent_context is not None
@ErrorFinder.register_rule(type='import_from')
class _FutureImportRule(SyntaxRule):
message = "from __future__ imports must occur at the beginning of the file"
def is_issue(self, node):
if _is_future_import(node):
if not _is_future_import_first(node):
return True
for from_name, future_name in node.get_paths():
name = future_name.value
allowed_futures = list(ALLOWED_FUTURES)
if self._normalizer.version >= (3, 5):
allowed_futures.append('generator_stop')
if name == 'braces':
self.add_issue(node, message="not a chance")
elif name == 'barry_as_FLUFL':
m = "Seriously I'm not implementing this :) ~ Dave"
self.add_issue(node, message=m)
elif name not in ALLOWED_FUTURES:
message = "future feature %s is not defined" % name
self.add_issue(node, message=message)
@ErrorFinder.register_rule(type='star_expr')
class _StarExprRule(SyntaxRule):
message = "starred assignment target must be in a list or tuple"
message_iterable_unpacking = "iterable unpacking cannot be used in comprehension"
message_assignment = "can use starred expression only as assignment target"
def is_issue(self, node):
if node.parent.type not in _STAR_EXPR_PARENTS:
return True
if node.parent.type == 'testlist_comp':
# [*[] for a in [1]]
if node.parent.children[1].type in _COMP_FOR_TYPES:
self.add_issue(node, message=self.message_iterable_unpacking)
if self._normalizer.version <= (3, 4):
n = search_ancestor(node, 'for_stmt', 'expr_stmt')
found_definition = False
if n is not None:
if n.type == 'expr_stmt':
exprs = _get_expr_stmt_definition_exprs(n)
else:
exprs = _get_for_stmt_definition_exprs(n)
if node in exprs:
found_definition = True
if not found_definition:
self.add_issue(node, message=self.message_assignment)
@ErrorFinder.register_rule(types=_STAR_EXPR_PARENTS)
class _StarExprParentRule(SyntaxRule):
def is_issue(self, node):
if node.parent.type == 'del_stmt':
self.add_issue(node.parent, message="can't use starred expression here")
else:
def is_definition(node, ancestor):
if ancestor is None:
return False
type_ = ancestor.type
if type_ == 'trailer':
return False
if type_ == 'expr_stmt':
return node.start_pos < ancestor.children[-1].start_pos
return is_definition(node, ancestor.parent)
if is_definition(node, node.parent):
args = [c for c in node.children if c != ',']
starred = [c for c in args if c.type == 'star_expr']
if len(starred) > 1:
message = "two starred expressions in assignment"
self.add_issue(starred[1], message=message)
elif starred:
count = args.index(starred[0])
if count >= 256:
message = "too many expressions in star-unpacking assignment"
self.add_issue(starred[0], message=message)
@ErrorFinder.register_rule(type='annassign')
class _AnnotatorRule(SyntaxRule):
# True: int
# {}: float
message = "illegal target for annotation"
def get_node(self, node):
return node.parent
def is_issue(self, node):
type_ = None
lhs = node.parent.children[0]
lhs = _remove_parens(lhs)
try:
children = lhs.children
except AttributeError:
pass
else:
if ',' in children or lhs.type == 'atom' and children[0] == '(':
type_ = 'tuple'
elif lhs.type == 'atom' and children[0] == '[':
type_ = 'list'
trailer = children[-1]
if type_ is None:
if not (lhs.type == 'name'
# subscript/attributes are allowed
or lhs.type in ('atom_expr', 'power')
and trailer.type == 'trailer'
and trailer.children[0] != '('):
return True
else:
# x, y: str
message = "only single target (not %s) can be annotated"
self.add_issue(lhs.parent, message=message % type_)
@ErrorFinder.register_rule(type='argument')
class _ArgumentRule(SyntaxRule):
def is_issue(self, node):
first = node.children[0]
if node.children[1] == '=' and first.type != 'name':
if first.type == 'lambdef':
# f(lambda: 1=1)
if self._normalizer.version < (3, 8):
message = "lambda cannot contain assignment"
else:
message = 'expression cannot contain assignment, perhaps you meant "=="?'
else:
# f(+x=1)
if self._normalizer.version < (3, 8):
message = "keyword can't be an expression"
else:
message = 'expression cannot contain assignment, perhaps you meant "=="?'
self.add_issue(first, message=message)
@ErrorFinder.register_rule(type='nonlocal_stmt')
class _NonlocalModuleLevelRule(SyntaxRule):
message = "nonlocal declaration not allowed at module level"
def is_issue(self, node):
return self._normalizer.context.parent_context is None
@ErrorFinder.register_rule(type='arglist')
class _ArglistRule(SyntaxRule):
@property
def message(self):
if self._normalizer.version < (3, 7):
return "Generator expression must be parenthesized if not sole argument"
else:
return "Generator expression must be parenthesized"
def is_issue(self, node):
first_arg = node.children[0]
if first_arg.type == 'argument' \
and first_arg.children[1].type in _COMP_FOR_TYPES:
# e.g. foo(x for x in [], b)
return len(node.children) >= 2
else:
arg_set = set()
kw_only = False
kw_unpacking_only = False
is_old_starred = False
# In python 3 this would be a bit easier (stars are part of
# argument), but we have to understand both.
for argument in node.children:
if argument == ',':
continue
if argument in ('*', '**'):
# Python < 3.5 has the order engraved in the grammar
# file. No need to do anything here.
is_old_starred = True
continue
if is_old_starred:
is_old_starred = False
continue
if argument.type == 'argument':
first = argument.children[0]
if first in ('*', '**'):
if first == '*':
if kw_unpacking_only:
# foo(**kwargs, *args)
message = "iterable argument unpacking " \
"follows keyword argument unpacking"
self.add_issue(argument, message=message)
else:
kw_unpacking_only = True
else: # Is a keyword argument.
kw_only = True
if first.type == 'name':
if first.value in arg_set:
# f(x=1, x=2)
self.add_issue(first, message="keyword argument repeated")
else:
arg_set.add(first.value)
else:
if kw_unpacking_only:
# f(**x, y)
message = "positional argument follows keyword argument unpacking"
self.add_issue(argument, message=message)
elif kw_only:
# f(x=2, y)
message = "positional argument follows keyword argument"
self.add_issue(argument, message=message)
@ErrorFinder.register_rule(type='parameters')
@ErrorFinder.register_rule(type='lambdef')
class _ParameterRule(SyntaxRule):
# def f(x=3, y): pass
message = "non-default argument follows default argument"
def is_issue(self, node):
param_names = set()
default_only = False
for p in _iter_params(node):
if p.name.value in param_names:
message = "duplicate argument '%s' in function definition"
self.add_issue(p.name, message=message % p.name.value)
param_names.add(p.name.value)
if p.default is None and not p.star_count:
if default_only:
return True
else:
default_only = True
@ErrorFinder.register_rule(type='try_stmt')
class _TryStmtRule(SyntaxRule):
message = "default 'except:' must be last"
def is_issue(self, try_stmt):
default_except = None
for except_clause in try_stmt.children[3::3]:
if except_clause in ('else', 'finally'):
break
if except_clause == 'except':
default_except = except_clause
elif default_except is not None:
self.add_issue(default_except, message=self.message)
@ErrorFinder.register_rule(type='fstring')
class _FStringRule(SyntaxRule):
_fstring_grammar = None
message_nested = "f-string: expressions nested too deeply"
message_conversion = "f-string: invalid conversion character: expected 's', 'r', or 'a'"
def _check_format_spec(self, format_spec, depth):
self._check_fstring_contents(format_spec.children[1:], depth)
def _check_fstring_expr(self, fstring_expr, depth):
if depth >= 2:
self.add_issue(fstring_expr, message=self.message_nested)
conversion = fstring_expr.children[2]
if conversion.type == 'fstring_conversion':
name = conversion.children[1]
if name.value not in ('s', 'r', 'a'):
self.add_issue(name, message=self.message_conversion)
format_spec = fstring_expr.children[-2]
if format_spec.type == 'fstring_format_spec':
self._check_format_spec(format_spec, depth + 1)
def is_issue(self, fstring):
self._check_fstring_contents(fstring.children[1:-1])
def _check_fstring_contents(self, children, depth=0):
for fstring_content in children:
if fstring_content.type == 'fstring_expr':
self._check_fstring_expr(fstring_content, depth)
class _CheckAssignmentRule(SyntaxRule):
def _check_assignment(self, node, is_deletion=False):
error = None
type_ = node.type
if type_ == 'lambdef':
error = 'lambda'
elif type_ == 'atom':
first, second = node.children[:2]
error = _get_comprehension_type(node)
if error is None:
if second.type == 'dictorsetmaker':
if self._normalizer.version < (3, 8):
error = 'literal'
else:
if second.children[1] == ':':
error = 'dict display'
else:
error = 'set display'
elif first in ('(', '['):
if second.type == 'yield_expr':
error = 'yield expression'
elif second.type == 'testlist_comp':
# This is not a comprehension, they were handled
# further above.
for child in second.children[::2]:
self._check_assignment(child, is_deletion)
else: # Everything handled, must be useless brackets.
self._check_assignment(second, is_deletion)
elif type_ == 'keyword':
if self._normalizer.version < (3, 8):
error = 'keyword'
else:
error = str(node.value)
elif type_ == 'operator':
if node.value == '...':
error = 'Ellipsis'
elif type_ == 'comparison':
error = 'comparison'
elif type_ in ('string', 'number', 'strings'):
error = 'literal'
elif type_ == 'yield_expr':
# This one seems to be a slightly different warning in Python.
message = 'assignment to yield expression not possible'
self.add_issue(node, message=message)
elif type_ == 'test':
error = 'conditional expression'
elif type_ in ('atom_expr', 'power'):
if node.children[0] == 'await':
error = 'await expression'
elif node.children[-2] == '**':
error = 'operator'
else:
# Has a trailer
trailer = node.children[-1]
assert trailer.type == 'trailer'
if trailer.children[0] == '(':
error = 'function call'
elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'):
for child in node.children[::2]:
self._check_assignment(child, is_deletion)
elif ('expr' in type_ and type_ != 'star_expr' # is a substring
or '_test' in type_
or type_ in ('term', 'factor')):
error = 'operator'
if error is not None:
cannot = "can't" if self._normalizer.version < (3, 8) else "cannot"
message = ' '.join([cannot, "delete" if is_deletion else "assign to", error])
self.add_issue(node, message=message)
@ErrorFinder.register_rule(type='comp_for')
@ErrorFinder.register_rule(type='sync_comp_for')
class _CompForRule(_CheckAssignmentRule):
message = "asynchronous comprehension outside of an asynchronous function"
def is_issue(self, node):
# Some of the nodes here are already used, so no else if
if node.type != 'comp_for' or self._normalizer.version < (3, 8):
# comp_for was replaced by sync_comp_for in Python 3.8.
expr_list = node.children[1 + int(node.children[0] == 'async')]
if expr_list.type != 'expr_list': # Already handled.
self._check_assignment(expr_list)
return node.children[0] == 'async' \
and not self._normalizer.context.is_async_funcdef()
@ErrorFinder.register_rule(type='expr_stmt')
class _ExprStmtRule(_CheckAssignmentRule):
message = "illegal expression for augmented assignment"
def is_issue(self, node):
for before_equal in node.children[:-2:2]:
self._check_assignment(before_equal)
augassign = node.children[1]
if augassign != '=' and augassign.type != 'annassign': # Is augassign.
return node.children[0].type in ('testlist_star_expr', 'atom', 'testlist')
@ErrorFinder.register_rule(type='with_item')
class _WithItemRule(_CheckAssignmentRule):
def is_issue(self, with_item):
self._check_assignment(with_item.children[2])
@ErrorFinder.register_rule(type='del_stmt')
class _DelStmtRule(_CheckAssignmentRule):
def is_issue(self, del_stmt):
child = del_stmt.children[1]
if child.type != 'expr_list': # Already handled.
self._check_assignment(child, is_deletion=True)
@ErrorFinder.register_rule(type='expr_list')
class _ExprListRule(_CheckAssignmentRule):
def is_issue(self, expr_list):
for expr in expr_list.children[::2]:
self._check_assignment(expr)
@ErrorFinder.register_rule(type='for_stmt')
class _ForStmtRule(_CheckAssignmentRule):
def is_issue(self, for_stmt):
# Some of the nodes here are already used, so no else if
expr_list = for_stmt.children[1]
if expr_list.type != 'expr_list': # Already handled.
self._check_assignment(expr_list)
|
N = int(input())
points = [tuple(int(x) for x in input().split()) for _ in range(N)]
if any(abs(x + y) % 2 != abs(sum(points[0])) % 2 for x, y in points):
print(-1)
quit()
if any(not (-10 <= x <= 10 and -10 <= y <= 10) for x, y in points):
assert False
M = 20 + (abs(sum(points[0])) % 2)
print(M)
print(*[1] * M)
for x, y in points:
print("LR"[x > 0] * abs(x) + "DU"[y > 0] * abs(y) + "DU" * ((M - abs(x) - abs(y)) // 2))
|
"""
Demonstration to modeling accelerator with the lte file.
SXFEL
Author : Tong Zhang
Created : 2016-04-12 10:11:06 AM CST
Last updated : 2016-04-12 21:20:16 PM CST
"""
import beamline
import os
import matplotlib.pyplot as plt
ltefile = os.path.join(os.getcwd(), 'sxfel/sxfel_v14b.lte')
lpins = beamline.LteParser(ltefile)
blname = 'bl'
newltefile = os.path.join(os.getcwd(), 'sxfel/sxfel.lte')
latins = beamline.Lattice(lpins.file2json())
latins.generateLatticeFile(blname, newltefile)
newlpins = beamline.LteParser(newltefile)
newlatins = beamline.Lattice(newlpins.file2json())
kw_name = 'Q01L0'
kw_eobj = newlpins.makeElement(kw_name)
kw_eobj.printConfig(type='all')
print newlpins.ctrlconf_dict[kw_name]
latmodel = beamline.Models(name=blname, mode='simu')
ele_name_list = newlatins.getElementList(blname)
ele_eobj_list = []
for ele in ele_name_list:
eobj = newlatins.makeElement(ele)
ele_eobj_list.append(eobj)
latmodel.addElement(*ele_eobj_list)
Q_list = latmodel.getElementsByName(kw_name.lower())
Q_list[0].printConfig(type='all')
B1LH_list = latmodel.getElementsByName('B1LH'.lower())
B1LH_list[0].printConfig(type='all')
finlatins = beamline.Lattice(latmodel.getAllConfig())
finltefile = os.path.join(os.getcwd(), 'sxfel/om.lte')
finlatins.generateLatticeFile(latmodel.name, finltefile)
simpath = os.path.join(os.getcwd(), 'sxfel')
elefile = os.path.join(simpath, 'om.ele')
elesim = beamline.Simulator()
elesim.setMode('elegant')
elesim.setScript('runElegant.sh')
elesim.setExec('/home/tong/APS/oag/apps/bin/linux-x86_64/elegant')
elesim.setPath(simpath)
elesim.setInputfiles(ltefile=finltefile, elefile=elefile)
elesim.doSimulation()
data_tp = elesim.getOutput(file = 'om.out', data = ('t', 'p' ))#, dump = h5out)
data_sSx = elesim.getOutput(file = 'om.sig', data = ('s', 'Sx' ))
data_setax = elesim.getOutput(file = 'om.twi', data = ('s', 'etax'))
"""
import numpy as np
dx = []
thetaArray = np.linspace(0.05,0.3,20)
for theta in thetaArray:
eleb1.setConf({'angle':theta}, type = 'simu')
latins = beamline.Lattice(latline_online.getAllConfig())
latins.generateLatticeFile(latline_online.name, latfile)
elesim.doSimulation()
data = elesim.getOutput(file = 'test.twi', data = (['etax']))
dx.append(data[-1])
dxArray = np.array(dx)
plt.plot(thetaArray, dxArray, 'r')
"""
ptches, anotes, xr, yr = latmodel.draw(mode='plain', showfig=False)
fig3 = plt.figure(2)
ax3 = fig3.add_subplot(111)
ax3.plot(data_setax[:,0],data_setax[:,1],'r-', lw=3,)
ax3.set_ylabel('$\eta_{x}\,[m]$')
ax3.set_xlabel('$s\,[m]$')
ax3t = ax3.twinx()
[ax3t.add_patch(i) for i in ptches]
xr3 = ax3.get_xlim()
yr3 = ax3.get_ylim()
x0, x1 = min(xr[0],xr3[0]), max(xr[1], xr3[1])
y0, y1 = min(yr[0],yr3[0]), max(yr[1], yr3[1])
ax3t.set_xlim(x0, x1)
ax3t.set_ylim(y0, y1*3)
ax3.set_xlim(x0, x1)
ax3.set_ylim(y0, y1)
ax3.grid()
newptches = beamline.MagBlock.copy_patches(ptches)
fig4 = plt.figure(4, figsize=(30,8), dpi=90)
ax4 = fig4.add_subplot(111, aspect=4)
beamline.Models.plotElements(ax4, newptches)
beamline.Models.anoteElements(ax4, anotes, efilter='CSRCSBEN',
textypos=1.5, color='b', rotation=50, fontsize='x-small')
beamline.Models.anoteElements(ax4, anotes, efilter=('RFCW','RFDF'),
textypos=None, arrowprops=None, color='k',
rotation=0, fontsize='small', fontweight='bold')
ax4.set_yticks([])
ax4.set_xlim(-1,125)
ax4.set_ylim(y0, y1*3)
ax4.set_xlabel('$s\,\mathrm{[m]}$', fontsize=20)
fig4.tight_layout()
ax4.set_title('SXFEL Lattice Layout', fontsize=24, color='m', fontweight='bold')
plt.show()
|
import matplotlib.pyplot as plt
import numpy as np
import pretty_print as pp
from collections import defaultdict
import zipfile
import percolations as perc
from time import clock
colorvec = ['black', 'red', 'orange', 'gold', 'green', 'blue', 'cyan', 'darkviolet', 'hotpink']
align_prop = 0.05
numsims = 1000 # number of simulations
size_epi = 515 # threshold value that designates an epidemic in the network (5% of network)
gamma = 0.2
T1, T2 = 0.0, 0.2
b1, b2 = (-T1 * gamma)/(T1 - 1), (-T2 * gamma)/(T2 - 1) # 0, .05
blist = np.linspace(b1, b2, num=11, endpoint=True) # probability of transmission
d_node_age = {}
zipname = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Results/beta_time_%ssims_beta%.3f-%.3f_vax0.zip' %(numsims, b1, b2)
graph_ages = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Age_Based_Simulations/Data/urban_ages_Sarah.csv') # node number and age class
for line in graph_ages:
new_line = line.split()
for line in new_line:
node, age = line.split(',')
d_node_age[node] = age # node-ageclass dictionary
N = len(d_node_age)
ch = [1 if d_node_age[str(node)] == '3' else 0 for node in xrange(1, int(N) + 1)]
ad = [1 if d_node_age[str(node)] == '4' else 0 for node in xrange(1, int(N) + 1)]
d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt = defaultdict(list), defaultdict(list), {}, defaultdict(list), defaultdict(list)
for beta in blist:
processing = clock()
# reference filenames in zipfolder
Itstep_file = 'Results/Itstep_beta_time_%ssims_beta%.3f_vax0.txt' %(numsims, beta)
Rtstep_file = 'Results/Rtstep_beta_time_%ssims_beta%.3f_vax0.txt' %(numsims, beta)
# recreate epidata from zip archive
d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt = perc.recreate_epidata(Itstep_file, Rtstep_file, zipname, beta, size_epi, ch, ad, d_epiincid, d_epiOR, d_epiresults, d_epiAR, d_epiOR_filt)
print beta, "processed", clock() - processing
beta_epi = list(set([key[0] for key in d_epiincid]))
for beta in beta_epi:
ORonly = clock()
# PROCESS X-AXIS: identify tstep at which sim reaches 5% of cum infections for the epidemic
# d_dummyalign_tstep[beta] = [5%cum-inf_tstep_sim1, 5%cum-inf_tstep_sim2..]
d_dummyalign_tstep, avg_align_tstep, dummyk = perc.define_epi_time(d_epiincid, beta, align_prop)
# TEST (11/19/13): realign plots for epitime to start at t = 0 by reassigning avg_align_tstep
avg_align_tstep = 0
# plot aligned data
# zip beta, episim number, and tstep for 5% cum-inf for sims where (beta, episim number) is the key for d_epiOR_filt
for k0, k1, t5 in zip((k[0] for k in dummyk), (k[1] for k in dummyk), d_dummyalign_tstep[beta]):
plt.plot(xrange(avg_align_tstep, avg_align_tstep+len(d_epiOR_filt[(k0, k1)][t5:])), d_epiOR_filt[(k0, k1)][t5:], marker = 'None', color = 'grey')
plt.plot(xrange(250), [1] * len(xrange(250)), marker = 'None', color = 'red', linewidth = 2)
plt.xlabel('epidemic time step, beta: ' + str(beta) + ', 5-95% cum infections')
plt.ylabel('OR, child:adult')
plt.ylim([0, 8])
plt.xlim([-1, 100])
print "OR only", beta, clock() - ORonly
# save plot
figname = 'Figures/epiORalign_beta_time_%ssims_beta%.3f_vax0.png' %(numsims, beta)
plt.savefig(figname)
plt.close()
pp.compress_to_ziparchive(zipname, figname)
for beta in beta_epi:
ORincid = clock()
# PROCESS X-AXIS: identify tstep at which sim reaches 5% of cum infections for the epidemic
# d_dummyalign_tstep[beta] = [5%cum-inf_tstep_sim1, 5%cum-inf_tstep_sim2..]
d_dummyalign_tstep, avg_align_tstep, dummyk = perc.define_epi_time(d_epiincid, beta, align_prop)
# TEST (11/19/13): realign plots for epitime to start at t = 0 by reassigning avg_align_tstep
avg_align_tstep = 0
# PROCESS YAX_AR:
# call upon d_epiAR dictionary
# dict_epiAR[(beta, simnumber, 'T', 'C' or 'A')] = [T, C or A attack rate at tstep 0, T, C or A attack rate at tstep 1...], where attack rate is number of new cases per 100 individuals
# plot data
# create two y-axes
fig, yax_OR = plt.subplots()
yax_AR = yax_OR.twinx()
# zip beta, episim number, and tstep for 5% cum-inf for sims where (beta, episim number) is the key for d_epiOR_filt
for k0, k1, t5 in zip((k[0] for k in dummyk), (k[1] for k in dummyk), d_dummyalign_tstep[beta]):
## OR y-axis
OR, = yax_OR.plot(xrange(avg_align_tstep, avg_align_tstep+len(d_epiOR_filt[(k0, k1)][t5:])), d_epiOR_filt[(k0, k1)][t5:], marker = 'None', color = 'grey')
## AR y-axis
child, = yax_AR.plot(xrange(avg_align_tstep, avg_align_tstep+len(d_epiAR[(k0, k1, 'C')][t5:])), [AR * 100 for AR in d_epiAR[(k0, k1, 'C')][t5:]], marker = 'None', color = 'red')
adult, = yax_AR.plot(xrange(avg_align_tstep, avg_align_tstep+len(d_epiAR[(k0, k1, 'A')][t5:])), [AR * 100 for AR in d_epiAR[(k0, k1, 'A')][t5:]], marker = 'None', color = 'blue')
# plot settings
lines = [OR, child, adult]
yax_OR.legend(lines, ['Odds Ratio', 'Child Incidence', 'Adult Incidence'], loc = 'upper right')
yax_OR.set_ylabel('OR, child:adult')
yax_OR.set_ylim([0, 8])
yax_OR.set_xlim([-1, 100])
yax_OR.set_xlabel('epidemic time step, beta: ' + str(beta) + ', 5-95% cum infections')
yax_AR.set_ylabel('Incidence per 100')
yax_AR.set_ylim([0, 8])
print "ORincid", beta, clock() - ORonly
# save plot
figname = 'Figures/epiORincid_beta_time_%ssims_beta%.3f_vax0.png' %(numsims, beta)
plt.savefig(figname)
plt.close()
pp.compress_to_ziparchive(zipname, figname)
|
import sys, os, codecs, unicodedata, json
try:
import amiga
except:
amiga=None
try:
import sl4a
except:
sl4a=None
try:
import posix
except:
posix=None
if posix is not None:
pass
if amiga is not None:
os.chdir('/Projects/Perception-IME/Catalogs')
if sl4a is not None:
os.chdir('/storage/sdcard1/Workspace/Perception-IME/Catalogs')
workspace=os.getcwd()
kanamap = {
'A':'\u3042', 'I':'\u3044', 'U':'\u3046', 'E':'\u3048', 'O':'\u304A', 'N':'\u3093',
'KA':'\u304B','GA':'\u304C','KI':'\u304D','GI':'\u304E', 'KU':'\u304F','GU':'\u3050', 'KE':'\u3051','GE':'\u3052', 'KO':'\u3053', 'GO':'\u3054',
'SA':'\u3055','ZA':'\u3056','SHI':'\u3057',
'JA':'\u3058\u3083','JI':'\u3058','JU':'\u3058\u3085','JE':'\u3058\u3047','JO':'\u3058\u3087',
'SU':'\u3059','ZU':'\u305A','SE':'\u305B','ZE':'\u305C','SO':'\u305D', 'ZO':'\u305E',
'TA':'\u305F','DA':'\u3060','CHI':'\u3061','DI':'\u3062','TSU':'\u3064','DZU':'\u3065','TE':'\u3066','DE':'\u3067', 'TO':'\u3068','DO':'\u3069',
'NA':'\u306A','NI':'\u306B','NU':'\u306C','NE':'\u306D','NO':'\u306E',
'HA':'\u306F','BA':'\u3070','PA':'\u3071',
'HI':'\u3072','BI':'\u3073','PI':'\u3074',
'FU':'\u3075','BU':'\u3076','PU':'\u3077',
'HE':'\u3078','BE':'\u3079','PE':'\u307A',
'HO':'\u307B','BO':'\u307C','PO':'\u307D',
'MA':'\u307E','MI':'\u307F','MU':'\u3080','ME':'\u3081','MO':'\u3082',
'YA':'\u3084','YU':'\u3086','YO':'\u3088',
'RA':'\u3089','RI':'\u308A','RU':'\u308B','RE':'\u308C','RO':'\u308D',
'WA':'\u308F','WO':'\u3092',
'KYA':'\u304D\u3083','GYA':'\u304E\u3083','KYU':'\u304D\u3085','GYU':'\u304E\u3085','KYO':'\u304D\u3087','GYO':'\u304E\u3087',
'SHA':'\u3057\u3083','CHA':'\u3061\u3083','SHU':'\u3057\u3085','CHU':'\u3061\u3085','SHO':'\u3057\u3087','CHO':'\u3061\u3087',
'NYA':'\u306B\u3083','NYU':'\u306B\u3085','NYO':'\u306B\u3087',
'HYA':'\u306F\u3083','BYA':'\u3070\u3083','PYA':'\u3071\u3083',
'HYU':'\u306F\u3085','BYU':'\u3070\u3085','PYU':'\u3071\u3085',
'HYO':'\u306F\u3087','BYO':'\u3070\u3087','PYO':'\u3071\u3087',
'MYA':'\u307F\u3083','MYU':'\u307F\u3085','MYO':'\u307F\u3087',
'RYA':'\u306B\u3083','RYU':'\u306B\u3085','RYO':'\u306B\u3087',
}
katamap = kanamap
class KanjiTree(object):
def __init__(self):
self.branches=dict()
self.kanji=str()
def addchildpath(self, path, item):
local=path[0:1]
cpath=path[1:]
if len(cpath) > 0:
if local in self.branches:
child=self.branches[local]
else:
child=KanjiTree()
child.addchildpath(cpath, item)
self.branches[local]=child
else:
self.kanji=self.kanji+' '+item
class KanjiTreeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj,KanjiTree):
return {obj.kanji: obj.branches}
return json.JSONEncoder.default(self, obj)
kanji=KanjiTree()
Japanese=dict()
def parseChinese(glyph, key, vector):
return
def toHiragana(roma):
k=None
s=roma[0:1]
if s in kanamap:
k=kanamap[s]
l=1
s=roma[0:2]
if s in kanamap:
k=kanamap[s]
l=2
s=roma[0:3]
if s in kanamap:
k=kanamap[s]
l=3
s=roma[0:4]
if s in kanamap:
k=kanamap[s]
l=4
if k is not None:
rc=k+toHiragana(roma[l:])
else:
rc=""
return rc
def toKatakana(roma):
k=None
s=roma[0:1]
if s in katamap:
k=katamap[s]
l=1
s=roma[0:2]
if s in katamap:
k=katamap[s]
l=2
s=roma[0:3]
if s in katamap:
k=katamap[s]
l=3
s=roma[0:4]
if s in katamap:
k=katamap[s]
l=4
if k is not None:
rc=k+toKatakana(roma[l:])
else:
rc=""
return rc
def parseJapanese(glyph, key, vector, jp=Japanese):
kana=None
reading=str()
for roma in vector:
kana=toHiragana(roma)
if kana is not None:
kanji.addchildpath(kana, glyph)
if glyph in jp:
jp[glyph]=jp[glyph]+'['+kana+']'
else:
jp[glyph]='['+kana+']'
def parseKorean(glyph, key, vector):
return
parsers = {
'kMandarin':parseChinese,
'kCantonese':parseChinese,
'kJapaneseOn':parseJapanese,
'kJapaneseKun':parseJapanese,
'kKorean':parseKorean
}
def parseUnihan(vector):
if len(vector) != 0:
if vector[0].startswith('U+'):
glyph = chr(int(vector[0][2:], 16))
kword = vector[1]
vargs = vector[2:]
if kword in parsers:
parsers[kword](glyph, kword, vargs)
for text in open('Unihan_Readings.txt', 'r', encoding='utf-8').read().split('\n'):
parseUnihan(text.split())
fh = open('JapaneseReadings.atxt','w')
try:
json.dump(kanji,fp=fh,ensure_ascii=True,indent=4,sort_keys=True,cls=KanjiTreeEncoder)
fh.close()
except:
pass
fh = open('JapaneseReadings.utxt','w', encoding='utf-8')
try:
json.dump(kanji,fp=fh,ensure_ascii=False,indent=4,sort_keys=True,cls=KanjiTreeEncoder)
fh.close()
except:
pass
|
from django.utils.decorators import method_decorator
from stronghold.decorators import public
class StrongholdPublicMixin(object):
@method_decorator(public)
def dispatch(self, *args, **kwargs):
return super(StrongholdPublicMixin, self).dispatch(*args, **kwargs)
|
class BehaviourControl(object):
def __init__(self, bot):
self.bot = bot
self.behaviours = {}
self.loadedBehaviours = {}
def add(self, behaviourName, behaviour):
behaviour = self.behaviours[behaviourName] = behaviour
return behaviour
def remove(self, behaviourName):
return self.behaviours.pop(behaviourName)
def hasBehaviour(self, behaviourName):
return not not self.behaviours.get(behaviourName)
def get(self, behaviourName):
return self.behaviours.get(behaviourName)
def getStatus(self, behaviourName):
if not self.hasBehaviour(behaviourName):
return 'unknown'
elif behaviourName in self.loadedBehaviours:
return 'loaded'
else:
return 'unloaded'
def load(self, behaviourName):
behaviour = self.behaviours[behaviourName]
behaviour.load()
self.loadedBehaviours[behaviourName] = behaviour
return behaviour
def unload(self, behaviourName):
self.loadedBehaviours[behaviourName].unload()
return self.loadedBehaviours.pop(behaviourName)
def loadAll(self):
[self.load(behaviourName) for behaviourName in self.behaviours if behaviourName not in self.loadedBehaviours]
def unloadAll(self):
loadedReversed = list(loadedBehaviours)
loadedReversed.reverse()
[self.unload(behaviourName) for behaviourName in (loadedReversed)]
|
import os
import sys
import djblets
_ = lambda s: s
DEBUG = True
ADMINS = (
('Example Joe', 'admin@example.com')
)
MANAGERS = ADMINS
USE_TZ = True
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
EMAIL_SUBJECT_PREFIX = "[Review Board] "
USE_I18N = False
LANGUAGES = (
('en', _('English')),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'djblets.extensions.loaders.load_template_source',
)
MIDDLEWARE_CLASSES = [
# Keep these first, in order
'django.middleware.gzip.GZipMiddleware',
'reviewboard.admin.middleware.InitReviewBoardMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# These must go before anything that deals with settings.
'djblets.siteconfig.middleware.SettingsMiddleware',
'reviewboard.admin.middleware.LoadSettingsMiddleware',
'djblets.extensions.middleware.ExtensionsMiddleware',
'djblets.log.middleware.LoggingMiddleware',
'reviewboard.accounts.middleware.TimezoneMiddleware',
'reviewboard.admin.middleware.CheckUpdatesRequiredMiddleware',
'reviewboard.admin.middleware.X509AuthMiddleware',
'reviewboard.site.middleware.LocalSiteMiddleware',
]
RB_EXTRA_MIDDLEWARE_CLASSES = []
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.core.context_processors.static',
'djblets.siteconfig.context_processors.siteconfig',
'djblets.util.context_processors.settingsVars',
'djblets.util.context_processors.siteRoot',
'djblets.util.context_processors.ajaxSerial',
'djblets.util.context_processors.mediaSerial',
'reviewboard.accounts.context_processors.auth_backends',
'reviewboard.admin.context_processors.version',
'reviewboard.site.context_processors.localsite',
)
SITE_ROOT_URLCONF = 'reviewboard.urls'
ROOT_URLCONF = 'djblets.util.rooturl'
REVIEWBOARD_ROOT = os.path.abspath(os.path.split(__file__)[0])
SITE_ROOT = '/'
TEMPLATE_DIRS = (
# Don't forget to use absolute paths, not relative paths.
os.path.join(REVIEWBOARD_ROOT, 'templates'),
)
STATICFILES_DIRS = (
('rb', os.path.join(REVIEWBOARD_ROOT, 'static', 'rb')),
('djblets', os.path.join(os.path.dirname(djblets.__file__), 'media')),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
RB_BUILTIN_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.markup',
'django.contrib.sites',
'django.contrib.sessions',
'django.contrib.staticfiles',
'djblets.datagrid',
'djblets.extensions',
'djblets.feedview',
'djblets.gravatars',
'djblets.log',
'djblets.pipeline',
'djblets.siteconfig',
'djblets.util',
'djblets.webapi',
'pipeline', # Must be after djblets.pipeline
'reviewboard.accounts',
'reviewboard.admin',
'reviewboard.attachments',
'reviewboard.changedescs',
'reviewboard.diffviewer',
'reviewboard.extensions',
'reviewboard.hostingsvcs',
'reviewboard.notifications',
'reviewboard.reviews',
'reviewboard.scmtools',
'reviewboard.site',
'reviewboard.webapi',
]
RB_EXTRA_APPS = []
WEB_API_ENCODERS = (
'djblets.webapi.encoders.ResourceAPIEncoder',
)
LOGGING_NAME = "reviewboard"
AUTH_PROFILE_MODULE = "accounts.Profile"
CACHE_EXPIRATION_TIME = 60 * 60 * 24 * 30 # 1 month
TEST_RUNNER = 'reviewboard.test.RBTestRunner'
install_help = '''
Please see http://www.reviewboard.org/docs/manual/dev/admin/
for help setting up Review Board.
'''
def dependency_error(string):
sys.stderr.write('%s\n' % string)
sys.stderr.write(install_help)
sys.exit(1)
if os.path.split(os.path.dirname(__file__))[1] != 'reviewboard':
dependency_error('The directory containing manage.py must be named "reviewboard"')
LOCAL_ROOT = None
PRODUCTION = True
try:
import settings_local
from settings_local import *
except ImportError, exc:
dependency_error('Unable to import settings_local.py: %s' % exc)
INSTALLED_APPS = RB_BUILTIN_APPS + RB_EXTRA_APPS + ['django_evolution']
MIDDLEWARE_CLASSES += RB_EXTRA_MIDDLEWARE_CLASSES
TEMPLATE_DEBUG = DEBUG
if not LOCAL_ROOT:
local_dir = os.path.dirname(settings_local.__file__)
if os.path.exists(os.path.join(local_dir, 'reviewboard')):
# reviewboard/ is in the same directory as settings_local.py.
# This is probably a Git checkout.
LOCAL_ROOT = os.path.join(local_dir, 'reviewboard')
PRODUCTION = False
else:
# This is likely a site install. Get the parent directory.
LOCAL_ROOT = os.path.dirname(local_dir)
HTDOCS_ROOT = os.path.join(LOCAL_ROOT, 'htdocs')
STATIC_ROOT = os.path.join(HTDOCS_ROOT, 'static')
MEDIA_ROOT = os.path.join(HTDOCS_ROOT, 'media')
EXTENSIONS_STATIC_ROOT = os.path.join(MEDIA_ROOT, 'ext')
ADMIN_MEDIA_ROOT = STATIC_ROOT + 'admin/'
STATIC_URL = getattr(settings_local, 'STATIC_URL', SITE_ROOT + 'static/')
MEDIA_URL = getattr(settings_local, 'MEDIA_URL', SITE_ROOT + 'media/')
LOGIN_URL = SITE_ROOT + 'account/login/'
LANGUAGE_COOKIE_NAME = "rblanguage"
SESSION_COOKIE_NAME = "rbsessionid"
SESSION_COOKIE_AGE = 365 * 24 * 60 * 60 # 1 year
SESSION_COOKIE_PATH = SITE_ROOT
PIPELINE_JS = {
'common': {
'source_filenames': (
'rb/js/jquery.form.js',
'rb/js/ui.autocomplete.js',
'rb/js/common.js',
'rb/js/datastore.js',
),
'output_filename': 'rb/js/base.min.js',
},
'reviews': {
'source_filenames': (
'rb/js/diffviewer.js',
'rb/js/reviews.js',
'rb/js/screenshots.js',
),
'output_filename': 'rb/js/reviews.min.js',
},
'admin': {
'source_filenames': (
'rb/js/flot/jquery.flot.min.js',
'rb/js/flot/jquery.flot.pie.min.js',
'rb/js/flot/jquery.flot.selection.min.js',
'rb/js/jquery.masonry.js',
'rb/js/admin.js',
),
'output_filename': 'rb/js/admin.min.js',
},
'repositoryform': {
'source_filenames': (
'rb/js/repositoryform.js',
),
'output_filename': 'rb/js/repositoryform.min.js',
},
}
PIPELINE_CSS = {
'common': {
'source_filenames': (
'rb/css/common.less',
'rb/css/dashboard.less',
'rb/css/search.less',
),
'output_filename': 'rb/css/common.min.css',
'absolute_paths': False,
},
'reviews': {
'source_filenames': (
'rb/css/diffviewer.less',
'rb/css/reviews.less',
'rb/css/syntax.css',
),
'output_filename': 'rb/css/reviews.min.css',
'absolute_paths': False,
},
'admin': {
'source_filenames': (
'rb/css/admin.less',
'rb/css/admin-dashboard.less',
),
'output_filename': 'rb/css/admin.min.css',
'absolute_paths': False,
},
}
BLESS_IMPORT_PATHS = ('rb/css/',)
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.jsmin.JSMinCompressor'
if PRODUCTION or not DEBUG or os.getenv('FORCE_BUILD_MEDIA', ''):
PIPELINE_COMPILERS = ['djblets.pipeline.compilers.bless.BlessCompiler']
PIPELINE = True
elif DEBUG:
PIPELINE_COMPILERS = []
PIPELINE = False
TEST_PACKAGES = ['reviewboard']
|
import json
import re
class myobject:
def __init__(self, coins, value):
self.coins = coins
self.value = value
myList = []
total = 0
file = open("devices.txt", "r")
txt = file.readline()
while txt != "":
# Read the rest of the record
txt = file.readline()
re1='.*?' # Non-greedy match on filler
re2='(?:[a-z][a-z]*[0-9]+[a-z0-9]*)' # Uninteresting: alphanum
re3='.*?' # Non-greedy match on filler
re4='(?:[a-z][a-z]*[0-9]+[a-z0-9]*)' # Uninteresting: alphanum
re5='.*?' # Non-greedy match on filler
re6='((?:[a-z][a-z]*[0-9]+[a-z0-9]*))' # Alphanum 1
rg = re.compile(re1+re2+re3+re4+re5+re6,re.IGNORECASE|re.DOTALL)
m = rg.search(txt)
if m:
alphanum1=m.group(1)
print "("+alphanum1+")"+"\n"
# Finally, finish the loop by reading the first line of the next record to
# set up for the next iteration of the loop.
txt = file.readline()
file.close()
|
import numpy as np
import itertools
import copy
r"""
This script implements 1st method and outputs the required transmission rate
"""
class FirstRate(object):
r"""
INPUT:
- ''demands_sender'' -- the [K*J] matrix: which user's requirement can be
fulfilled by which sender
- '' t '' -- int(M*K/ I)
OUTPUT:
- ''.R_max'' -- the maximum required transmission rate of senders (by 2nd method)
- ''.R_min'' -- the minimum required transmission rate of senders
- ''.r_max'' -- the maximum required transmission rate through links (by 2nd method)
- ''.r_min'' -- the minimum required transmission rate through links (except for 0)
- ''.user_sender_packets'' -- this matrix tells each user gets how many packets
from each sender, i.e., the required transmission
rate through each link
- ''.real_user_subset_demands_sender'' -- this dictionary tells each user
gets file from which sender during
different delivery_task
- ''.sender_packet'' -- the vector tells the required transmission rate of senders
"""
def __init__(self, demands_sender, t):
self.__demands_sender = np.array(demands_sender)
self.__t = t
self.R_max = 0
self.R_min = 0
self.r_max = 0
self.r_min = 0
def required_rate(self):
K = self.__demands_sender.shape[0]
J = self.__demands_sender.shape[1]
user_subset_demands_sender = []
#to track every delivery task and its relevant capable senders
cut_value = []
#to track every delivery task will be split into how many smaller pieces
S = itertools.combinations(range(K), self.__t+1)
user_subsets = [f for f in S]
for one_user_subset in user_subsets:
one_user_subset_demands_sender = np.ones(np.shape(self.__demands_sender), dtype = np.int)*2
for one_user in one_user_subset:
one_user_subset_demands_sender[one_user] = copy.deepcopy(self.__demands_sender[one_user])
user_subset_demands_sender.append(one_user_subset_demands_sender)
cut_value.append(np.min(np.sum(one_user_subset_demands_sender, axis=1)))
for delivery_task in range(len(user_subset_demands_sender)):
for one_user in user_subsets[delivery_task]:
cut_recorder = 0
for one_sender in range(len(user_subset_demands_sender[delivery_task][one_user])):
cut_recorder = cut_recorder + user_subset_demands_sender[delivery_task][one_user][one_sender]
if cut_recorder > cut_value[delivery_task]:
user_subset_demands_sender[delivery_task][one_user][one_sender] = 0
self.real_user_subset_demands_sender = []
for delivery_task in range(len(user_subsets)):
one_user_subset_demands_sender = np.zeros(np.shape(self.__demands_sender), dtype = np.int)
for one_user in user_subsets[delivery_task]:
one_user_subset_demands_sender[one_user] = copy.deepcopy(user_subset_demands_sender[delivery_task][one_user])
self.real_user_subset_demands_sender.append(one_user_subset_demands_sender)
assignment_result = [] # to track which sender participates in each delivery_task
for delivery_task in range(len(self.real_user_subset_demands_sender)):
assignment_result_lang = np.zeros(J)
for one_user in range(len(self.real_user_subset_demands_sender[delivery_task])):
for one_sender in range(len(self.real_user_subset_demands_sender[delivery_task][one_user])):
if self.real_user_subset_demands_sender[delivery_task][one_user][one_sender] == 1:
assignment_result_lang[one_sender] = 1/(cut_value[delivery_task])
assignment_result.append(assignment_result_lang)
self.sender_packet = np.zeros(J) # to calculate R
for rate_single_delivery_task in assignment_result:
self.sender_packet = self.sender_packet + rate_single_delivery_task
#################################################################
self.R_max = self.sender_packet.max()
self.R_min = self.sender_packet.min()
#################################################################
self.user_sender_packets = np.zeros(np.shape(self.__demands_sender), dtype = np.int) # to calculate r
for delivery_task in range(len(self.real_user_subset_demands_sender)):
self.real_user_subset_demands_sender[delivery_task] = self.real_user_subset_demands_sender[delivery_task]/cut_value[delivery_task]
self.user_sender_packets = self.user_sender_packets + self.real_user_subset_demands_sender[delivery_task]
######################################################################
self.r_max = self.user_sender_packets.max()
min_recorder = 10000
for i in self.user_sender_packets:
for j in i:
if j != 0:
min_recorder = min(min_recorder, j)
self.r_min = min_recorder
######################################################################
return([self.R_max, self.r_max])
if __name__ == "__main__":
demands_sender = [[0, 1, 1], [1, 0, 1], [1, 1, 0]]
t = 1
a = FirstRate(demands_sender, t)
b = a.required_rate()
|
from setuptools import setup
from codecs import open # Following PyPUG advice; not neccessary in Python 3.x
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='dreamhostapi',
version='0.1.0',
description='A Python wrapper around DreamHost\'s API',
long_description=long_description,
url='https://github.com/mcgid/python-dreamhostapi',
author='mcgid',
author_email='dan@mcgid.ca',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
# Untested on any other Python version
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='DreamHost API interaction wrapper',
packages=['dreamhostapi'],
install_requires=['requests'],
)
|
import json
from six.moves import urllib_parse as urlparse
from cachebrowser.bootstrap import bootstrapper, BootstrapError
from cachebrowser.common import extract_url_hostname
from cachebrowser.models import Host, DoesNotExist
from cachebrowser.network import HttpConnectionHandler
from cachebrowser import http
from cachebrowser import common
class ResponseOptions(object):
def __init__(self, send_json=True, content_type=None, status=200, reason='OK'):
self.send_json = send_json
if content_type is not None:
self.content_type = content_type
else:
self.content_type = 'application/json' if self.send_json else 'text/plain'
self.status = status
self.reason = reason or {
200: 'OK',
404: 'Not Found',
403: 'Forbidden'
}.get(self.status, '')
class BaseAPIHandler(HttpConnectionHandler):
def __init__(self, *args, **kwargs):
self.method_handlers = {}
self.send_json = True
@common.silent_fail(log=True)
def on_request(self, env, start_response):
method = env['REQUEST_METHOD'].upper()
path = env['PATH_INFO'].lower().strip('/')
if method not in self.method_handlers or path not in self.method_handlers[method]:
start_response('404 Not Found', [])
return
if method == 'GET':
request = urlparse.parse_qs(env.get('QUERY_STRING', ''), True)
for query in request:
if len(request[query]) == 1:
request[query] = request[query][0]
else:
request = json.loads(env.get('wsgi.input', '{}'))
response = self.method_handlers[method][path](request)
if type(response) == tuple:
response, options = response
else:
options = ResponseOptions()
start_response('%d %s' % (options.status, options.reason), [('Content-Type', options.content_type)])
if options.send_json:
yield json.dumps(response)
else:
yield response
def register_api(self, method, path, handler):
method = method.upper()
path = path.lower().strip('/')
if method not in self.method_handlers:
self.method_handlers[method] = {}
self.method_handlers[method][path] = handler
class APIHandler(BaseAPIHandler):
def __init__(self, *args, **kwargs):
super(APIHandler, self).__init__(*args, **kwargs)
self.register_api('PUT', '/host/bootstrap', self.action_add_host)
self.register_api('GET', '/host/check', self.action_check_host)
self.register_api('GET', '/cachebrowse', self.action_get)
@staticmethod
def action_add_host(request):
try:
host = bootstrapper.bootstrap(request['host'])
return {
'result': 'success',
'host': host.hostname
}
except BootstrapError as e:
return {
'result': 'fail',
'error': e.message
}
@staticmethod
def action_check_host(request):
hostname = extract_url_hostname(request['host'])
try:
is_active = Host.get(Host.hostname == hostname).is_active
except DoesNotExist:
is_active = False
return {
'result': 'active' if is_active else 'inactive',
'host': request['host']
}
@staticmethod
def action_get(request):
keys = ['url', 'target', 'method', 'scheme', 'port']
kwargs = {k: request[k] for k in keys if k in request}
response = http.request(**kwargs)
if request.get('json', False):
response_message = {
'status': response.status,
'reason': response.reason
}
if request.get('headers', True):
response_message['headers'] = {k: v for (k, v) in response.getheaders()}
if request.get('raw', False):
response_message['raw'] = response.read(raw=True)
elif request.get('body', True):
response_message['body'] = response.read()
return response_message
else:
if request.get('raw', False):
return response.get_raw(), ResponseOptions(send_json=False)
else:
return response.body, ResponseOptions(send_json=False)
|
import numpy
n, m = map(int, input().split())
items = []
for _ in range(n):
items.append(
list(
map(
int,
input().split()
)
)
)
print(numpy.mean(items, axis=1))
print(numpy.var(items, axis=0))
print(numpy.std(items, axis=None))
|
import os
import unittest
from pip.compat import stdlib_pkgs
from reqpy.main import get_excludes, check_file, parse_args
from reqpy.main import get_inherited_excludes
def collector():
dirname = os.path.dirname
start_dir = dirname(dirname(dirname(os.path.abspath(__file__))))
return unittest.defaultTestLoader.discover(start_dir)
class TestApp(unittest.TestCase):
def test_excludes_list(self):
freeze_excludes = list(stdlib_pkgs) + ['setuptools', 'pip', 'distribute']
freeze_excludes += ['wheel', 'rope', 'jedi', 'importmagic']
path = os.path.join(os.getcwd(), 'reqpy', 'tests', 'reqpy.txt')
self.assertEqual(freeze_excludes, get_excludes(path))
def test_check_file(self):
name = __file__
self.assertEqual(name, check_file(name))
with self.assertRaises(OSError):
check_file('fake_file')
def test_inheritance_excludes_list_with_no_more_inheritance(self):
freeze_excludes = ['wheel', 'rope', 'jedi', 'importmagic']
path = os.path.join(os.getcwd(), 'reqpy', 'tests', 'reqpy.txt')
self.assertEqual(freeze_excludes, get_inherited_excludes(path))
def test_inheritance_excludes_list_with_one_inheritance(self):
freeze_excludes = ['wheel', 'rope', 'jedi', 'importmagic']
freeze_excludes += ['one', 'two', 'three']
path = os.path.join(os.getcwd(), 'reqpy', 'tests', '1.txt')
self.assertEqual(freeze_excludes, get_inherited_excludes(path))
def test_inheritance_excludes_list_with_two_inheritance(self):
freeze_excludes = ['wheel', 'rope', 'jedi', 'importmagic']
freeze_excludes += ['one', 'two', 'three']
freeze_excludes += ['four', 'five', 'six']
path = os.path.join(os.getcwd(), 'reqpy', 'tests', '2.txt')
self.assertEqual(freeze_excludes, get_inherited_excludes(path))
def test_parse_args_with_no_args(self):
try:
parse_args([])
except SystemExit:
pass
def test_parse_args_with_only_file(self):
name = 'requirments.txt'
args = parse_args([name])
self.assertEqual(name, args.file)
self.assertEqual(True, args.g)
self.assertEqual(None, args.i)
def test_parse_args_with_file_and_g_flag(self):
name = 'requirments.txt'
args = parse_args(['-g', name])
self.assertEqual(name, args.file)
self.assertEqual(False, args.g)
self.assertEqual(None, args.i)
def test_parse_args_with_file_and_inherit(self):
name = 'requirments.txt'
inherit = __file__
args = parse_args(['-i', inherit, name])
self.assertEqual(name, args.file)
self.assertEqual(True, args.g)
self.assertEqual(inherit, args.i)
# Tests for non-existing file
inherit = 'fake'
with self.assertRaises(OSError):
parse_args(['-i', inherit, name])
def test_parse_args_with_file_and_g_flag_and_inherit(self):
name = 'requirments.txt'
inherit = __file__
args = parse_args(['-i', inherit, '-g', name])
self.assertEqual(name, args.file)
self.assertEqual(False, args.g)
self.assertEqual(inherit, args.i)
if __name__ == '__main__':
unittest.main()
|
print("importing packages")
from keras.models import Sequential
from keras.layers import Dense, Activation
import keras.utils.visualize_util as keras_vis
from mnist import MNIST
import pdb
import numpy as np
from matplotlib import pyplot as plt
print("configuring script")
EXMNISTIMG = './example_mnist.png'
MNISTDATA = '/Users/eric/Projects/neural_net/python-mnist/data'
GRAPHOUT = './basic_dot_model.png'
model = Sequential()
print("initializing sequential model")
model.add(Dense(output_dim = 100, input_dim = 784))
model.add(Activation("relu"))
model.add(Dense(output_dim = 64))
model.add(Activation("relu"))
model.add(Dense(output_dim = 10))
model.add(Activation("softmax"))
print("plotting basic model outline to {}".format(GRAPHOUT))
keras_vis.plot(model, to_file = GRAPHOUT)
print("compiling the net")
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy']
)
print("loading MNIST data")
mndata = MNIST(MNISTDATA)
mndata.load_training()
mndata.load_testing()
print("MNIST data loaded")
print("writing ex training image to {}".format(EXMNISTIMG))
fig = plt.figure()
imgplot = plt.imshow(np.array(mndata.train_images[0]).reshape([28, 28]))
imgplot.set_cmap('gray')
plt.savefig(EXMNISTIMG)
|
from dataactvalidator.app import createApp
from dataactcore.interfaces.db import GlobalDB
from dataactcore.models import lookups
from dataactcore.models.jobModels import JobStatus, JobType, FileType, PublishStatus
def setupJobTrackerDB():
"""Create job tracker tables from model metadata."""
with createApp().app_context():
sess = GlobalDB.db().session
insertCodes(sess)
sess.commit()
def insertCodes(sess):
"""Create job tracker tables from model metadata."""
# TODO: define these codes as enums in the data model?
# insert status types
for s in lookups.JOB_STATUS:
status = JobStatus(job_status_id=s.id, name=s.name, description=s.desc)
sess.merge(status)
# insert job types
for t in lookups.JOB_TYPE:
thisType = JobType(job_type_id=t.id, name=t.name, description=t.desc)
sess.merge(thisType)
# insert publish status
for ps in lookups.PUBLISH_STATUS:
status = PublishStatus(publish_status_id=ps.id, name=ps.name, description=ps.desc)
sess.merge(status)
# insert file types
for ft in lookups.FILE_TYPE:
fileType = FileType(file_type_id=ft.id, name=ft.name, description=ft.desc, letter_name=ft.letter)
sess.merge(fileType)
if __name__ == '__main__':
setupJobTrackerDB()
|
from django.conf.urls import url
from eguard.views import dealAppoint
urlpatterns = [
url(r'^$', dealAppoint, name='dealAppoint'),
]
|
import logging
import argparse
from sqlalchemy import and_, or_
import pandas as pd
from dataactcore.interfaces.db import GlobalDB
from dataactcore.logging import configure_logging
from dataactvalidator.health_check import create_app
from dataactbroker.helpers.generic_helper import batch
from dataactcore.utils.duns import update_duns_props, update_duns, LOAD_BATCH_SIZE
from dataactcore.models.domainModels import DUNS
logger = logging.getLogger(__name__)
def backfill_uei_via_entity_api(sess, table):
""" Backfill any extraneous data (ex. uei) missing from V1 data that wasn't updated by V2
Args:
sess: database connection
table: table to backfill
"""
duns_to_update = sess.query(table.awardee_or_recipient_uniqu).filter(
or_(DUNS.uei.is_(None), and_(DUNS.ultimate_parent_unique_ide.isnot(None),
DUNS.ultimate_parent_uei.is_(None)))).all()
for duns_batch in batch(duns_to_update, LOAD_BATCH_SIZE):
df = pd.DataFrame(columns=['awardee_or_recipient_uniqu'])
df = df.append(duns_batch)
df = update_duns_props(df)
df = df[['awardee_or_recipient_uniqu', 'uei', 'ultimate_parent_uei']]
update_duns(sess, df, table_name=table.__table__.name)
def backfill_uei_crosswalk(sess, table_name):
""" Backfill any extraneous data (ex. uei) missing from V1 data that wasn't updated by V2
Args:
sess: database connection
table_name: table to backfill
"""
blank_uei_query = """
SELECT awardee_or_recipient_uniqu
FROM {table_name}
WHERE uei IS NULL;
""".format(table_name=table_name)
duns_to_update = [row['awardee_or_recipient_uniqu'] for row in sess.execute(blank_uei_query).fetchall()]
for duns_batch in batch(duns_to_update, LOAD_BATCH_SIZE):
df = pd.DataFrame()
df['awardee_or_recipient_uniqu'] = duns_batch
df = update_duns_props(df, api='iqaas')
df = df[['awardee_or_recipient_uniqu', 'uei']]
update_duns(sess, df, table_name=table_name)
if __name__ == '__main__':
configure_logging()
parser = argparse.ArgumentParser(description='Get data from SAM and backfill uei')
parser.add_argument("-m", "--method", choices=['duns', 'crosswalk'], default='crosswalk',
help='Select method of backfilling (duns table, uei crosswalk table)')
parser.add_argument("-ct", "--crosswalk_table", default='uei-crosswalk',
help='Name of the crosswalk table to backfill')
args = parser.parse_args()
method = args.method
crosswalk_table = args.crosswalk_table
with create_app().app_context():
sess = GlobalDB.db().session
affected = 0
if method == 'duns':
logger.info('Backfilling empty uei and ultimate_parent_uei in the DUNS table using the entity API.')
backfill_uei_via_entity_api(sess, DUNS)
else:
logger.info('Backfilling {} using the IQaaS API.'.format(crosswalk_table))
backfill_uei_crosswalk(sess, table_name=crosswalk_table)
logger.info('Backfill completed')
sess.close()
|
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1008120003.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
'''
Created on Jul 14, 2014
@author: anroco
How to working the format method of a python str?
¿Como funciona el metodo format de un string en python?
'''
s = 'Today is {}, {}'.format('Jul 14', '2014')
print(s)
s = '{2}, {0} and {1}'.format('first', 'second', 'third')
print(s)
s = 'vowels are: {0}, {1}, {2}, {3} and {4}'.format(*'aeiou')
print(s)
s = '{0}, {1}, {0}, {1}.'.format('red lorry', 'yellow lorry')
print(s)
s = 'Tom eats {food} in {site}'.format(food='hamburger', site='McDonalds')
print(s)
d = {'dog': 'Buz', 'cat': 'Darco'}
s = 'My pets are {dog} and {cat}'.format(**d)
print(s)
s = '{name} gets in spanish {0} and {1} in social '.format(4, 5, name='Clark')
print(s)
|
from Xlib import X, XK, display
from Xlib.ext import record
from Xlib.protocol import rq
class KeyEvents(object):
def __init__(self, key_pressed_callback):
self.local_dpy = display.Display(':0')
self.record_dpy = display.Display(':0')
self.key_pressed_callback = key_pressed_callback
self.exit_flag = True
def record_callback(self, reply):
if reply.category != record.FromServer:
return
if reply.client_swapped:
return
if not len(reply.data) or (reply.data[0]) < 2:
# not an event
return
data = reply.data
while len(data):
event, data = rq.EventField(None).parse_binary_value(data, self.record_dpy.display, None, None)
if event.type in [X.KeyPress, X.KeyRelease]:
self.key_pressed_callback()
def start(self):
r = self.record_dpy.record_get_version(0, 0)
# Create a recording context; we only want key and mouse events
ctx = self.record_dpy.record_create_context(
0,
[record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (X.KeyPress, X.KeyPress),
'errors': (0, 0),
'client_started': False,
'client_died': False,
}])
# Enable the context; this only returns after a call to record_disable_context,
# while calling the callback function in the meantime
self.record_dpy.record_enable_context(ctx, self.record_callback)
# Finally free the context
self.record_dpy.record_free_context(ctx)
|
import os
import sys
import time
import glob
import misopy
from misopy.settings import Settings
from misopy.settings import miso_path as miso_settings_path
import misopy.hypothesis_test as ht
import misopy.as_events as as_events
import misopy.cluster_utils as cluster_utils
import misopy.sam_utils as sam_utils
import misopy.miso_sampler as miso
import misopy.Gene as gene_utils
import misopy.gff_utils as gff_utils
import misopy.misc_utils as misc_utils
import misopy.samples_utils as samples_utils
from misopy.parse_csv import *
import numpy as np
np.seterr(all='ignore')
miso_path = os.path.dirname(os.path.abspath(__file__))
def greeting(parser=None):
print "MISO (Mixture of Isoforms model)"
print "Summarize MISO output to get Psi values and confidence intervals."
print "Use --help argument to view options.\n"
if parser is not None:
parser.print_help()
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--summarize-samples", dest="summarize_samples",
nargs=2, default=None,
help="Compute summary statistics of the given set "
"of samples. Expects a directory with MISO output "
"and a directory to output summary file to.")
parser.add_option("--summary-label", dest="summary_label",
nargs=1, default=None,
help="Label for MISO summary file. If not given, "
"uses basename of MISO output directory.")
parser.add_option("--use-compressed", dest="use_compressed",
nargs=1, default=None,
help="Use compressed event IDs. Takes as input a "
"genes_to_filenames.shelve file produced by the "
"index_gff script.")
(options, args) = parser.parse_args()
greeting()
use_compressed = None
if options.use_compressed is not None:
use_compressed = \
os.path.abspath(os.path.expanduser(options.use_compressed))
if not os.path.exists(use_compressed):
print "Error: mapping filename from event IDs to compressed IDs %s " \
"is not found." %(use_compressed)
sys.exit(1)
else:
print "Compression being used."
##
## Summarizing samples
##
if options.summarize_samples:
samples_dir = \
os.path.abspath(os.path.expanduser(options.summarize_samples[0]))
if options.summary_label != None:
samples_label = options.summary_label
print "Using summary label: %s" %(samples_label)
else:
samples_label = \
os.path.basename(os.path.expanduser(samples_dir))
assert(len(samples_label) >= 1)
summary_output_dir = \
os.path.abspath(os.path.join(os.path.expanduser(options.summarize_samples[1]),
'summary'))
if not os.path.isdir(summary_output_dir):
misc_utils.make_dir(summary_output_dir)
summary_filename = os.path.join(summary_output_dir,
'%s.miso_summary' %(samples_label))
samples_utils.summarize_sampler_results(samples_dir,
summary_filename,
use_compressed=use_compressed)
if __name__ == "__main__":
main()
|
from borrar import *
from web import form
import os.path
from time import time
formulariop = form.Form(
form.Textbox("nombre", description = "Nombre:", value=""),
form.Textarea("sugerencia", description = "Haz una sugerencia para la asignatura de IV:", value=""),
form.Button("Enviar"),
)
class Formulario:
def GET(self):
form = formulariop()
html = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>PON TUS SUGERENCIAS PARA IV</title>
<link rel="stylesheet" href="static/comun.css">
<link rel="stylesheet" href="static/estilo.css">
<link rel="stylesheet" href="static/formulario.css">
</head>
<body>
<h1>Formularios Avanzados</h1>
<p>Tu nombre en .</p>
<form method="POST">
%s
</form>
</body>
</html>""" % (form.render())
return html
def POST(self):
form = formulariop()
form.validates()
f = open ("sugerencias.txt", "a")
f.write("%s : \n" % (form.d.nombre))
f.write("%s \n\n --- \n" % (form.d.sugerencia))
f.close()
f=open("sugerencias.txt")
texto = f.read()
f.close()
html = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Avanzado</title>
<link rel="stylesheet" href="static/comun.css">
<link rel="stylesheet" href="static/moonchild.css">
<link rel="stylesheet" href="static/formulario.css">
</head>
<body>
<h1>Gracias</h1>
<p>En breve se enviara la informacion</p>
<p>Sugerencias hasta el momento</p>
<p>%s</p>
</body>
</html>""" % texto
return html
|
from distutils.core import setup, Command
from distutils.command.build import build
from distutils.command.build_scripts import build_scripts
from distutils.command.clean import clean
from distutils.command.install import install
from distutils.command.install_data import install_data
from distutils.command.install_lib import install_lib
from distutils.command.install_scripts import install_scripts
from distutils.command.sdist import sdist
from distutils.dep_util import newer
from distutils.dir_util import mkpath, remove_tree
from distutils.util import change_root, subst_vars
import codecs
import collections
import glob
import os
import os.path
import re
import subprocess
import sys
x_scripts = {
'bin': [
'bin/ebuild', 'bin/egencache', 'bin/emerge', 'bin/emerge-webrsync',
'bin/emirrordist', 'bin/portageq', 'bin/quickpkg', 'bin/repoman'
],
'sbin': [
'bin/archive-conf', 'bin/dispatch-conf', 'bin/emaint', 'bin/env-update',
'bin/etc-update', 'bin/fixpackages', 'bin/regenworld'
],
}
class x_build(build):
""" Build command with extra build_man call. """
def run(self):
build.run(self)
self.run_command('build_man')
class build_man(Command):
""" Perform substitutions in manpages. """
user_options = [
]
def initialize_options(self):
self.build_base = None
def finalize_options(self):
self.set_undefined_options('build',
('build_base', 'build_base'))
def run(self):
for d, files in self.distribution.data_files:
if not d.startswith('$mandir/'):
continue
for source in files:
target = os.path.join(self.build_base, source)
mkpath(os.path.dirname(target))
if not newer(source, target) and not newer(__file__, target):
continue
print('copying and updating %s -> %s' % (
source, target))
with codecs.open(source, 'r', 'utf8') as f:
data = f.readlines()
data[0] = data[0].replace('VERSION',
self.distribution.get_version())
with codecs.open(target, 'w', 'utf8') as f:
f.writelines(data)
class docbook(Command):
""" Build docs using docbook. """
user_options = [
('doc-formats=', None, 'Documentation formats to build (all xmlto formats for docbook are allowed, comma-separated'),
]
def initialize_options(self):
self.doc_formats = 'xhtml,xhtml-nochunks'
def finalize_options(self):
self.doc_formats = self.doc_formats.replace(',', ' ').split()
def run(self):
if not os.path.isdir('doc/fragment'):
mkpath('doc/fragment')
with open('doc/fragment/date', 'w'):
pass
with open('doc/fragment/version', 'w') as f:
f.write('<releaseinfo>%s</releaseinfo>' % self.distribution.get_version())
for f in self.doc_formats:
print('Building docs in %s format...' % f)
subprocess.check_call(['xmlto', '-o', 'doc',
'-m', 'doc/custom.xsl', f, 'doc/portage.docbook'])
class epydoc(Command):
""" Build API docs using epydoc. """
user_options = [
]
def initialize_options(self):
self.build_lib = None
def finalize_options(self):
self.set_undefined_options('build_py', ('build_lib', 'build_lib'))
def run(self):
self.run_command('build_py')
print('Building API documentation...')
process_env = os.environ.copy()
pythonpath = self.build_lib
try:
pythonpath += ':' + process_env['PYTHONPATH']
except KeyError:
pass
process_env['PYTHONPATH'] = pythonpath
subprocess.check_call(['epydoc', '-o', 'epydoc',
'--name', self.distribution.get_name(),
'--url', self.distribution.get_url(),
'-qq', '--no-frames', '--show-imports',
'--exclude', 'portage.tests',
'_emerge', 'portage', 'repoman'],
env = process_env)
os.remove('epydoc/api-objects.txt')
class install_docbook(install_data):
""" install_data for docbook docs """
user_options = install_data.user_options
def initialize_options(self):
install_data.initialize_options(self)
self.htmldir = None
def finalize_options(self):
self.set_undefined_options('install', ('htmldir', 'htmldir'))
install_data.finalize_options(self)
def run(self):
if not os.path.exists('doc/portage.html'):
self.run_command('docbook')
self.data_files = [
(self.htmldir, glob.glob('doc/*.html')),
]
install_data.run(self)
class install_epydoc(install_data):
""" install_data for epydoc docs """
user_options = install_data.user_options
def initialize_options(self):
install_data.initialize_options(self)
self.htmldir = None
def finalize_options(self):
self.set_undefined_options('install', ('htmldir', 'htmldir'))
install_data.finalize_options(self)
def run(self):
if not os.path.exists('epydoc/index.html'):
self.run_command('epydoc')
self.data_files = [
(os.path.join(self.htmldir, 'api'), glob.glob('epydoc/*')),
]
install_data.run(self)
class x_build_scripts_custom(build_scripts):
def finalize_options(self):
build_scripts.finalize_options(self)
if 'dir_name' in dir(self):
self.build_dir = os.path.join(self.build_dir, self.dir_name)
if self.dir_name in x_scripts:
self.scripts = x_scripts[self.dir_name]
else:
self.scripts = set(self.scripts)
for other_files in x_scripts.values():
self.scripts.difference_update(other_files)
def run(self):
# group scripts by subdirectory
split_scripts = collections.defaultdict(list)
for f in self.scripts:
dir_name = os.path.dirname(f[len('bin/'):])
split_scripts[dir_name].append(f)
base_dir = self.build_dir
base_scripts = self.scripts
for d, files in split_scripts.items():
self.build_dir = os.path.join(base_dir, d)
self.scripts = files
self.copy_scripts()
# restore previous values
self.build_dir = base_dir
self.scripts = base_scripts
class x_build_scripts_bin(x_build_scripts_custom):
dir_name = 'bin'
class x_build_scripts_sbin(x_build_scripts_custom):
dir_name = 'sbin'
class x_build_scripts_portagebin(x_build_scripts_custom):
dir_name = 'portage'
class x_build_scripts(build_scripts):
def initialize_option(self):
build_scripts.initialize_options(self)
def finalize_options(self):
build_scripts.finalize_options(self)
def run(self):
self.run_command('build_scripts_bin')
self.run_command('build_scripts_portagebin')
self.run_command('build_scripts_sbin')
class x_clean(clean):
""" clean extended for doc & post-test cleaning """
def clean_docs(self):
def get_doc_outfiles():
for dirpath, dirnames, filenames in os.walk('doc'):
for f in filenames:
if f.endswith('.docbook') or f == 'custom.xsl':
pass
else:
yield os.path.join(dirpath, f)
# do not recurse
break
for f in get_doc_outfiles():
print('removing %s' % repr(f))
os.remove(f)
if os.path.isdir('doc/fragment'):
remove_tree('doc/fragment')
if os.path.isdir('epydoc'):
remove_tree('epydoc')
def clean_tests(self):
# do not remove incorrect dirs accidentally
top_dir = os.path.normpath(os.path.join(self.build_lib, '..'))
cprefix = os.path.commonprefix((self.build_base, top_dir))
if cprefix != self.build_base:
return
bin_dir = os.path.join(top_dir, 'bin')
if os.path.exists(bin_dir):
remove_tree(bin_dir)
conf_dir = os.path.join(top_dir, 'cnf')
if os.path.islink(conf_dir):
print('removing %s symlink' % repr(conf_dir))
os.unlink(conf_dir)
pni_file = os.path.join(top_dir, '.portage_not_installed')
if os.path.exists(pni_file):
print('removing %s' % repr(pni_file))
os.unlink(pni_file)
def clean_man(self):
man_dir = os.path.join(self.build_base, 'man')
if os.path.exists(man_dir):
remove_tree(man_dir)
def run(self):
if self.all:
self.clean_tests()
self.clean_docs()
self.clean_man()
clean.run(self)
class x_install(install):
""" install command with extra Portage paths """
user_options = install.user_options + [
# note: $prefix and $exec_prefix are reserved for Python install
('system-prefix=', None, "Prefix for architecture-independent data"),
('system-exec-prefix=', None, "Prefix for architecture-specific data"),
('bindir=', None, "Install directory for main executables"),
('datarootdir=', None, "Data install root directory"),
('docdir=', None, "Documentation install directory"),
('htmldir=', None, "HTML documentation install directory"),
('mandir=', None, "Manpage root install directory"),
('portage-base=', 'b', "Portage install base"),
('portage-bindir=', None, "Install directory for Portage internal-use executables"),
('portage-datadir=', None, 'Install directory for data files'),
('sbindir=', None, "Install directory for superuser-intended executables"),
('sysconfdir=', None, 'System configuration path'),
]
# note: the order is important for proper substitution
paths = [
('system_prefix', '/usr'),
('system_exec_prefix', '$system_prefix'),
('bindir', '$system_exec_prefix/bin'),
('sbindir', '$system_exec_prefix/sbin'),
('sysconfdir', '/etc'),
('datarootdir', '$system_prefix/share'),
('docdir', '$datarootdir/doc/$package-$version'),
('htmldir', '$docdir/html'),
('mandir', '$datarootdir/man'),
('portage_base', '$system_exec_prefix/lib/portage'),
('portage_bindir', '$portage_base/bin'),
('portage_datadir', '$datarootdir/portage'),
# not customized at the moment
('logrotatedir', '$sysconfdir/logrotate.d'),
('portage_confdir', '$portage_datadir/config'),
('portage_setsdir', '$portage_confdir/sets'),
]
def initialize_options(self):
install.initialize_options(self)
for key, default in self.paths:
setattr(self, key, default)
self.subst_paths = {}
def finalize_options(self):
install.finalize_options(self)
# substitute variables
new_paths = {
'package': self.distribution.get_name(),
'version': self.distribution.get_version(),
}
for key, default in self.paths:
new_paths[key] = subst_vars(getattr(self, key), new_paths)
setattr(self, key, new_paths[key])
self.subst_paths = new_paths
class x_install_data(install_data):
""" install_data with customized path support """
user_options = install_data.user_options
def initialize_options(self):
install_data.initialize_options(self)
self.build_base = None
self.paths = None
def finalize_options(self):
install_data.finalize_options(self)
self.set_undefined_options('build',
('build_base', 'build_base'))
self.set_undefined_options('install',
('subst_paths', 'paths'))
def run(self):
self.run_command('build_man')
def process_data_files(df):
for d, files in df:
# substitute man sources
if d.startswith('$mandir/'):
files = [os.path.join(self.build_base, v) for v in files]
# substitute variables in path
d = subst_vars(d, self.paths)
yield (d, files)
old_data_files = self.data_files
self.data_files = process_data_files(self.data_files)
install_data.run(self)
self.data_files = old_data_files
class x_install_lib(install_lib):
""" install_lib command with Portage path substitution """
user_options = install_lib.user_options
def initialize_options(self):
install_lib.initialize_options(self)
self.portage_base = None
self.portage_bindir = None
self.portage_confdir = None
def finalize_options(self):
install_lib.finalize_options(self)
self.set_undefined_options('install',
('portage_base', 'portage_base'),
('portage_bindir', 'portage_bindir'),
('portage_confdir', 'portage_confdir'))
def install(self):
ret = install_lib.install(self)
def rewrite_file(path, val_dict):
path = os.path.join(self.install_dir, path)
print('Rewriting %s' % path)
with codecs.open(path, 'r', 'utf-8') as f:
data = f.read()
for varname, val in val_dict.items():
regexp = r'(?m)^(%s\s*=).*$' % varname
repl = r'\1 %s' % repr(val)
data = re.sub(regexp, repl, data)
with codecs.open(path, 'w', 'utf-8') as f:
f.write(data)
rewrite_file('portage/__init__.py', {
'VERSION': self.distribution.get_version(),
})
rewrite_file('portage/const.py', {
'PORTAGE_BASE_PATH': self.portage_base,
'PORTAGE_BIN_PATH': self.portage_bindir,
'PORTAGE_CONFIG_PATH': self.portage_confdir,
})
return ret
class x_install_scripts_custom(install_scripts):
def initialize_options(self):
install_scripts.initialize_options(self)
self.root = None
def finalize_options(self):
self.set_undefined_options('install',
('root', 'root'),
(self.var_name, 'install_dir'))
install_scripts.finalize_options(self)
self.build_dir = os.path.join(self.build_dir, self.dir_name)
# prepend root
if self.root is not None:
self.install_dir = change_root(self.root, self.install_dir)
class x_install_scripts_bin(x_install_scripts_custom):
dir_name = 'bin'
var_name = 'bindir'
class x_install_scripts_sbin(x_install_scripts_custom):
dir_name = 'sbin'
var_name = 'sbindir'
class x_install_scripts_portagebin(x_install_scripts_custom):
dir_name = 'portage'
var_name = 'portage_bindir'
class x_install_scripts(install_scripts):
def initialize_option(self):
pass
def finalize_options(self):
pass
def run(self):
self.run_command('install_scripts_bin')
self.run_command('install_scripts_portagebin')
self.run_command('install_scripts_sbin')
class x_sdist(sdist):
""" sdist defaulting to .tar.bz2 format """
def finalize_options(self):
if self.formats is None:
self.formats = ['bztar']
sdist.finalize_options(self)
class build_tests(x_build_scripts_custom):
""" Prepare build dir for running tests. """
def initialize_options(self):
x_build_scripts_custom.initialize_options(self)
self.build_base = None
self.build_lib = None
def finalize_options(self):
x_build_scripts_custom.finalize_options(self)
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'))
# since we will be writing to $build_lib/.., it is important
# that we do not leave $build_base
self.top_dir = os.path.normpath(os.path.join(self.build_lib, '..'))
cprefix = os.path.commonprefix((self.build_base, self.top_dir))
if cprefix != self.build_base:
raise SystemError('build_lib must be a subdirectory of build_base')
self.build_dir = os.path.join(self.top_dir, 'bin')
def run(self):
self.run_command('build_py')
# install all scripts $build_lib/../bin
# (we can't do a symlink since we want shebangs corrected)
x_build_scripts_custom.run(self)
# symlink 'cnf' directory
conf_dir = os.path.join(self.top_dir, 'cnf')
if os.path.exists(conf_dir):
if not os.path.islink(conf_dir):
raise SystemError('%s exists and is not a symlink (collision)'
% repr(conf_dir))
os.unlink(conf_dir)
conf_src = os.path.relpath('cnf', self.top_dir)
print('Symlinking %s -> %s' % (conf_dir, conf_src))
os.symlink(conf_src, conf_dir)
# create $build_lib/../.portage_not_installed
# to enable proper paths in tests
with open(os.path.join(self.top_dir, '.portage_not_installed'), 'w') as f:
pass
class test(Command):
""" run tests """
user_options = []
def initialize_options(self):
self.build_lib = None
def finalize_options(self):
self.set_undefined_options('build',
('build_lib', 'build_lib'))
def run(self):
self.run_command('build_tests')
subprocess.check_call([
sys.executable, '-bWd',
os.path.join(self.build_lib, 'portage/tests/runTests.py')
])
def find_packages():
for dirpath, dirnames, filenames in os.walk('pym'):
if '__init__.py' in filenames:
yield os.path.relpath(dirpath, 'pym')
def find_scripts():
for dirpath, dirnames, filenames in os.walk('bin'):
for f in filenames:
if f not in ['deprecated-path']:
yield os.path.join(dirpath, f)
def get_manpages():
linguas = os.environ.get('LINGUAS')
if linguas is not None:
linguas = linguas.split()
for dirpath, dirnames, filenames in os.walk('man'):
groups = collections.defaultdict(list)
for f in filenames:
fn, suffix = f.rsplit('.', 1)
groups[suffix].append(os.path.join(dirpath, f))
topdir = dirpath[len('man/'):]
if not topdir or linguas is None or topdir in linguas:
for g, mans in groups.items():
yield [os.path.join('$mandir', topdir, 'man%s' % g), mans]
setup(
name = 'portage',
version = '2.2.15',
url = 'https://wiki.gentoo.org/wiki/Project:Portage',
author = 'Gentoo Portage Development Team',
author_email = 'dev-portage@gentoo.org',
package_dir = {'': 'pym'},
packages = list(find_packages()),
# something to cheat build & install commands
scripts = list(find_scripts()),
data_files = list(get_manpages()) + [
['$sysconfdir', ['cnf/etc-update.conf', 'cnf/dispatch-conf.conf']],
['$logrotatedir', ['cnf/logrotate.d/elog-save-summary']],
['$portage_confdir', [
'cnf/make.conf.example', 'cnf/make.globals', 'cnf/repos.conf']],
['$portage_setsdir', ['cnf/sets/portage.conf']],
['$docdir', ['NEWS', 'RELEASE-NOTES']],
['$portage_base/bin', ['bin/deprecated-path']],
['$sysconfdir/portage/repo.postsync.d', ['cnf/repo.postsync.d/example']],
],
cmdclass = {
'build': x_build,
'build_man': build_man,
'build_scripts': x_build_scripts,
'build_scripts_bin': x_build_scripts_bin,
'build_scripts_portagebin': x_build_scripts_portagebin,
'build_scripts_sbin': x_build_scripts_sbin,
'build_tests': build_tests,
'clean': x_clean,
'docbook': docbook,
'epydoc': epydoc,
'install': x_install,
'install_data': x_install_data,
'install_docbook': install_docbook,
'install_epydoc': install_epydoc,
'install_lib': x_install_lib,
'install_scripts': x_install_scripts,
'install_scripts_bin': x_install_scripts_bin,
'install_scripts_portagebin': x_install_scripts_portagebin,
'install_scripts_sbin': x_install_scripts_sbin,
'sdist': x_sdist,
'test': test,
},
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: System :: Installation/Setup'
]
)
|
from django.test import TransactionTestCase
from mirrors.models import MirrorRsync, Mirror
TEST_IPV6 = "2a0b:4342:1a31:410::"
TEST_IPV4 = "8.8.8.8"
class MirrorRsyncTest(TransactionTestCase):
def setUp(self):
self.mirror = Mirror.objects.create(name='rmirror',
admin_email='foo@bar.com')
def tearDown(self):
self.mirror.delete()
def test_ipv6(self):
mirrorrsync = MirrorRsync.objects.create(ip=TEST_IPV6, mirror=self.mirror)
self.assertEqual(str(mirrorrsync), TEST_IPV6)
mirrorrsync.delete()
def test_ipv4(self):
mirrorrsync = MirrorRsync.objects.create(ip=TEST_IPV4, mirror=self.mirror)
self.assertEqual(str(mirrorrsync), TEST_IPV4)
mirrorrsync.delete()
def test_invalid(self):
with self.assertRaises(ValueError) as e:
MirrorRsync.objects.create(ip="8.8.8.8.8", mirror=self.mirror)
self.assertIn('IPv4 Address with more than 4 bytes', str(e.exception))
|
'''
User-defined widget with functionality to deal with labels in
an interactive way
'''
from PyQt5 import QtGui, QtWidgets, QtCore
class LabelWidget(QtWidgets.QWidget):
'''
Widget to visualise and assign labels
'''
def __init__(self, item, father):
'''
Initialises the widget.
parameters
- item: the table item around which we wrap the functionality
- father: the parent widget
'''
super(LabelWidget, self).__init__()
self.layout = QtWidgets.QHBoxLayout()
self.layout.setAlignment(QtCore.Qt.AlignLeft)
self.setLayout(self.layout)
self.style = 'QLabel { background-color : #AAAAAA; color: black; }'
self.father = father
self.item = item
self.item.setForeground(QtGui.QColor(255, 255, 255))
self.setContentsMargins(0, 0, 0, 0)
self.layout.setContentsMargins(0, 0, 0, 0)
def labels_to_string(self):
'''
Transforms a set of labels into a single string.
returns:
- A string representation of the widget's labels
'''
if self.layout.count() > 0:
labels = self.layout.itemAt(0).widget().text()
for i in range(1, self.layout.count()):
labels = labels + ', ' + self.layout.itemAt(i).widget().text()
else:
labels = ''
return labels
def string_to_labels(self, text):
'''
Transform a string representation into a set of labels. Labels are
supposed to be comma-separated.
parameters:
- text: the string to parse for labels
'''
# Removing the elements in the layout
for i in reversed(range(self.layout.count())):
self.layout.itemAt(i).widget().setParent(None)
# Adding the labels
labels = text.split(',')
#if not (len(labels) == 1 and labels[0] == ''):
# removing duplicates and sorting
labels = list(set(labels))
# Adding label widgets
for label in labels:
label_text = str(label).strip()
if label_text != '':
label_widget = QtWidgets.QLabel(label_text)
label_widget.setStyleSheet(self.style)
self.layout.addWidget(label_widget)
self.item.setText(self.labels_to_string())
def get_labels(self):
'''
Getter for the labels contained by the widget
returns:
- a list which contains the labels in the widget
'''
labels = []
if self.layout.count() > 0:
for i in range(0, self.layout.count()):
labels.append(self.layout.itemAt(i).widget().text())
return labels
def mousePressEvent(self, event):
'''
Signal slot for the event of clicking on the widget. The event parameter is unused.
'''
# pylint: disable=invalid-name
# pylint: disable=unused-argument
labels = self.labels_to_string()
for label in self.get_labels():
self.father.models['label_list_model'].remove(label)
text, ok = QtWidgets.QInputDialog.getText(
self,
'Labels',
'Enter labels (comma-separated)',
QtWidgets.QLineEdit.Normal,
labels)
if ok:
self.string_to_labels(text)
self.father.changed = text != labels
for label in self.get_labels():
self.father.models['label_list_model'].add(label)
QtWidgets.qApp.processEvents() # this line makes the labels to be
# painted before resizing the table's
# columns
|
"""
==============================
``umansysprop.results`` Module
==============================
This module defines the classes used to encapsulate results returned by the
UManSysProp server. Each tool method on the client will return an instance of
the :class:`Result` class which in turn contains one or more :class:`Table`
instances.
Result
======
.. autoclass:: Result
Table
=====
.. autoclass:: Table
"""
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import sys
import json
import itertools
class Result(list):
"""
Represents a list of named :class:`Table` objects.
The result of a method is represented as a sequence of tables. This class
contains a list of :class:`Table` objects each of which may be retrieved by
name, or by index in the list (tables with identical names are not ignored,
but only the first table may be retrieved by name).
.. note::
This class has an extended string representation intended for easy
command line debugging. Simply print an instance of the class to view a
dump of all the tables contained within it.
"""
def __init__(self, *tables):
super(Result, self).__init__(tables)
@classmethod
def from_json(cls, obj):
"""
This class constructor accepts a parsed JSON object (created by
:func:`umansysprop.renderers.render_json`, or with the same structure
produced by that function) and constructs the :class:`Result` from this
structure.
"""
def to_tuple(v):
if isinstance(v, list):
return tuple(v)
else:
return v
tables = []
for table_dict in obj:
name = table_dict['name']
title = table_dict['title']
rows_title = to_tuple(table_dict['rows_title'])
rows_unit = to_tuple(table_dict['rows_unit'])
cols_title = to_tuple(table_dict['cols_title'])
cols_unit = to_tuple(table_dict['cols_unit'])
rows = []
cols = []
data = {}
for datum in table_dict['data']:
key = datum['key']
value = datum['value']
row_key, col_key = (to_tuple(v) for v in key)
if row_key not in rows:
rows.append(row_key)
if col_key not in cols:
cols.append(col_key)
data[(row_key, col_key)] = value
tables.append(Table(
name, rows, cols, data=data, title=title,
rows_title=rows_title, rows_unit=rows_unit,
cols_title=cols_title, cols_unit=cols_unit))
return cls(*tables)
def __getattr__(self, name):
for table in self:
if table.name == name:
return table
return super(Result, self).__getattr__(name)
def __str__(self):
return '\n'.join(
line
for table in self
for line in (
table.name,
'=' * len(table.name),
'',
str(table),
'',
)
)
class Table(object):
"""
Represents a single table in a :class:`Result`.
A tool is expected to return a sequence of :class:`Table` objects in a
:class:`Result` object. Each table has a :attr:`name` (which can be used to
access it in the :class:`Result` object), an ordered list of keys for
:attr:`rows` and :attr:`cols`, and a function which is used to derive the
data for each cell. The function accepts two arguments, the row and column
key in that order, and is expected to return a scalar value. The reason for
constructing a table in this manner (lazy evaluation) is that it enables
renderers to query the table structure and layout without necessarily
calculating anything. Calculated data is cached on the assumption that such
calculations are expensive.
The row and column keys can be any immutable value (immutability is
required as they will form keys in a dict at evaluation time). Keys which
are tuples will be treated specially as renderers. For example, if each
row key is a 2-tuple, then each row in the resulting table will have two
row headers in two separate columns at the left of the table. This can
aid in representing data with more than 2 dimensions in a table.
Consider a result set keyed by values A, B, and C. The table can be
constructed with a series of 2-tuple row keys (A, B), while the column can
be scalar C values. The resulting table will be rendered as follows:
+----+----+------+------+------+
| | | C1 | C2 | C3 |
+====+====+======+======+======+
| A1 | B1 | data | data | data |
+ +----+------+------+------+
| | B2 | data | data | data |
+----+----+------+------+------+
| A2 | B1 | data | data | data |
+ +----+------+------+------+
| | B2 | data | data | data |
+----+----+------+------+------+
Optional attributes also exist for :attr:`title`, :attr:`rows_title`,
:attr:`cols_title`, :attr:`rows_unit`, and :attr:`cols_unit` (these all
default to an empty string if omitted). In the case that tuples are used
for row or column keys, the corresponding title and unit values must be
tuples as well.
.. note::
Like :class:`Result`, this class has an extended string representation
intended for easy command line debugging. Printing an instance of
this class will produce a human readable string representation of the
table's row and column keys along with the calculated data.
.. autoattribute:: as_ndarray
.. autoattribute:: as_dataframe
.. autoattribute:: col_dims
.. autoattribute:: col_titles
.. autoattribute:: cols
.. autoattribute:: cols_iter
.. attribute:: cols_title
A string or tuple of strings giving the title of each column dimension.
Note that if :attr:`col_dims` is 1, this may be either a string or
a 1-tuple containing a string. The associated :attr:`col_titles`
attribute may be easier to work with.
.. attribute:: cols_unit
A string or tuple of strings giving the units of each column dimension.
Note that if :attr:`col_dims` is 1, this may be either a string or
a 1-tuple containing a string. The associated :attr:`col_units`
attribute may be easier to work with.
.. autoattribute:: data
.. autoattribute:: data_iter
.. attribute:: name
The name of the table. This is intended for scripting usage and as such
will only ever contain a string beginning with an alphabetic character
followed by zero or more alphanumeric characters or underscores.
.. autoattribute:: row_dims
.. autoattribute:: row_titles
.. autoattribute:: rows
.. autoattribute:: rows_iter
.. attribute:: rows_title
A string or tuple of strings giving the title of each row dimension.
Note that if :attr:`row_dims` is 1, this may be either a string or
a 1-tuple containing a string. The associated :attr:`row_titles`
attribute may be easier to work with.
.. attribute:: rows_unit
A string or tuple of strings giving the units of each row dimension.
Note that if :attr:`row_dims` is 1, this may be either a string or
a 1-tuple containing a string. The associated :attr:`row_units`
attribute may be easier to work with.
.. attribute:: title
The human readable title of the table, typically rendered in the web
interface as the table's caption.
"""
def __init__(
self, name, rows, cols, func=None, data=None, title='',
rows_title=None, cols_title=None, rows_unit=None, cols_unit=None):
if func is None and data is None:
raise ValueError('Either func or data must be specified')
self._rows = tuple(rows)
self._cols = tuple(cols)
if not self._rows:
raise ValueError('Table must have at least one row key')
if not self._cols:
raise ValueError('Table must have at least one column key')
self._row_dims = len(self.rows[0]) if isinstance(self.rows[0], tuple) else 1
self._col_dims = len(self.cols[0]) if isinstance(self.cols[0], tuple) else 1
self.rows_title = self._keys_default(rows_title, self.row_dims)
self.rows_unit = self._keys_default(rows_unit, self.row_dims)
self.cols_title = self._keys_default(cols_title, self.col_dims)
self.cols_unit = self._keys_default(cols_unit, self.col_dims)
self._row_spans = self._calculate_spans(tuple(self.rows_iter), self.row_dims)
self._col_spans = self._calculate_spans(tuple(self.cols_iter), self.col_dims)
self._func = func
self._data = data
self.name = name
self.title = title
def _keys_default(self, value, dims):
if value is None:
if dims == 1:
value = ''
else:
value = ('',) * dims
if dims > 1:
if not isinstance(value, tuple):
raise ValueError('%r is not a tuple' % value)
if len(value) != dims:
raise ValueError('%r does not contain %dims elements' % (value, dims))
return value
def _calculate_spans(self, keys, dims):
if not keys:
raise ValueError('keys cannot be empty')
if dims > 1:
spans = []
for dim in range(dims):
dim_spans = []
last_value = None
span = 0
for i, key in enumerate(keys):
value = key[dim]
if span == 0:
for j, comparison in enumerate(keys[i:]):
if comparison[dim] == key[dim]:
span += 1
else:
break
dim_spans.append(span)
else:
dim_spans.append(0)
span -= 1
spans.append(dim_spans)
return {key: tuple(spans[d][i] for d in range(dims)) for i, key in enumerate(keys)}
else:
return {key: (1,) for key in keys}
@property
def rows(self):
"""
An ordered sequence of keys for the rows of the table. If
:attr:`row_dims` is greater than one, then this is a sequence of
tuples. These values, combined with :attr:`cols` can be used to
index :attr:`data` in display order like so::
for row in table.rows:
for col in table.cols:
print(table.data[(row, col)])
"""
return self._rows
@property
def row_dims(self):
"""
The number of dimensions within the row keys. If this is greater than
one, then :attr:`rows` is a sequence of tuples.
"""
return self._row_dims
@property
def rows_iter(self):
"""
Returns an iterator over :attr:`rows` where each key is returned as a
tuple, regardless. This property is intended to make renderers simpler.
"""
for row in self._rows:
if self.row_dims > 1:
yield row
else:
yield (row,)
@property
def row_titles(self):
"""
Returns :attr:`rows_title` as a tuple, regardless. This property is
intended to make renderers simpler.
"""
if self.row_dims > 1:
return self.rows_title
else:
return (self.rows_title,)
@property
def row_units(self):
"""
Returns :attr:`rows_unit` as a tuple, regardless. This property is
intended to make renderers simpler.
"""
if self.row_dims > 1:
return self.rows_unit
else:
return (self.rows_unit,)
@property
def row_spans(self):
"""
A mapping of row keys (as tuples, as from :attr:`rows_iter`) to a tuple
of row spans. For example, consider the following sequence of row
keys::
(1, 1), (1, 2), (1, 3), (2, 1), (2, 2)
This would generate the following row spans mapping::
{
(1, 1): (3, 1),
(1, 2): (0, 1),
(1, 3): (0, 1),
(2, 1): (2, 1),
(2, 2): (0, 1),
}
Indicating that the first element of the first key should span three
rows, and that the subsequent first elements within the spanned rows
should not be rendered at all. This property is intended to make
renderers that target human-readable formats simpler.
"""
return self._row_spans
@property
def cols(self):
"""
An ordered sequence of keys for the columns of the table. If
:attr:`col_dims` is greater than one, this this is a sequence of
tuples. These values, combined with :attr:`rows` can be used to index
:attr:`data` in display order like so::
for row in table.rows:
for col in table.cols:
print(table.data[(row, col)])
"""
return self._cols
@property
def col_dims(self):
"""
The number of dimensions within the column keys. If this is greater
than one, then :attr:`cols` is a sequence of tuples.
"""
return self._col_dims
@property
def cols_iter(self):
"""
Returns an iterator over :attr:`cols` where each key is returned as
a tuple, regardless.
"""
for col in self._cols:
if self.col_dims > 1:
yield col
else:
yield (col,)
@property
def col_titles(self):
"""
Returns :attr:`cols_title` as a tuple, regardless. This property is
intended to make renderers simpler.
"""
if self.col_dims > 1:
return self.cols_title
else:
return (self.cols_title,)
@property
def col_units(self):
"""
Returns :attr:`cols_unit` as a tuple, regardless. This property is
intended to make renderers simpler.
"""
if self.col_dims > 1:
return self.cols_unit
else:
return (self.cols_unit,)
@property
def col_spans(self):
"""
A mapping of col keys (as tuples, as from :attr:`cols_iter`) to a tuple
of col spans. See :attr:`row_spans` for an example of the mapping
returned.
"""
return self._col_spans
@property
def data(self):
"""
The data contained within the table. This is presented as a dict
keyed by `(row_key, col_key)` tuples. To retrieve data in the same
order as it should be presented, iterate over the :attr:`rows` and
:attr:`cols` attributes.
"""
if self._data is None:
self._data = {
(row, col): self._func(row, col)
for row in self.rows
for col in self.cols
}
self._func = None
return self._data
@property
def data_iter(self):
"""
Returns an iterator over :attr:`data` where each key, value combination
is returned as a tuple of (row_key, col_key, value), and each row and
column key is returned as a tuple, regardless of the number of row and
column dimensions. Furthermore, items are returned in declared row then
column order. This property is intended to make renderers simpler.
"""
for row_tuple, row_key in zip(self.rows_iter, self.rows):
for col_tuple, col_key in zip(self.cols_iter, self.cols):
yield row_tuple, col_tuple, self.data[(row_key, col_key)]
@property
def as_ndarray(self):
"""
Returns the content of the table as a `numpy`_ :class:`ndarray` with
the shape ``(rows, cols)``. Rows and columns will be in the order given
by the :attr:`rows` and :attr:`cols` attributes. Please note that row
and column keys are *not* included in the resulting array (as ndarrays
purposely do not support heterogeneous data types).
.. warning::
Accessing this property will implicitly import the numpy module.
This is not done during module import to avoid creating an
explicit dependency on numpy.
.. _numpy: http://www.numpy.org/
"""
import numpy as np
return np.asarray(
[
[self.data[(row, col)] for col in self.cols]
for row in self.rows
], dtype=np.float)
@property
def as_dataframe(self):
"""
Returns the content of the table as a `pandas`_ :class:`DataFrame`. The
:attr:`rows` and :attr:`cols` attributes will be included as the index
and columns of the resulting DataFrame.
.. warning::
Accessing this property will implicitly import the pandas module.
This is not done during module import to avoid creating an
explicit dependency on pandas.
.. _pandas: http://pandas.pydata.org/
"""
import pandas as pd
if self.row_dims == 1:
rows_index = pd.Index(self.rows, name=self.rows_title)
else:
rows_index = pd.MultiIndex.from_tuples(self.rows, names=self.rows_title)
if self.col_dims == 1:
cols_index = pd.Index(self.cols, name=self.cols_title)
else:
cols_index = pd.MultiIndex.from_tuples(self.cols, names=self.cols_title)
return pd.DataFrame(self.as_ndarray, index=rows_index, columns=cols_index)
def __repr__(self):
return '<Table name="%s">' % self.name
def __str__(self):
# Calculate maximum columns lengths. Firstly, row headers
if self.row_dims > 1:
max_col_lens = [
max(len(str(row[i]).strip()) for row in self.rows)
for i in range(self.row_dims)
]
else:
max_col_lens = [
max(len(str(row).strip()) for row in self.rows)
]
# then column headers and data...
if self.col_dims > 1:
max_col_lens.extend([
max(
max(len(str(self.data[(row, col)]).strip()) for row in self.rows), # lengths of all col values
*(len(str(header).strip()) for header in col) # length of col header(s)
)
for col in self.cols
])
else:
max_col_lens.extend([
max(
max(len(str(self.data[(row, col)]).strip()) for row in self.rows),
len(str(col).strip()),
)
for col in self.cols
])
result = ''
# Print the column headers
if self.col_dims > 1:
for i in range(self.col_dims):
result += ' | '.join(
'%*s' % (max_col_len, str(col[i]))
for max_col_len, col in zip(
max_col_lens, (('',) * self.col_dims,) * self.row_dims + self.cols)
)
result += '\n'
else:
result += ' | '.join(
'%*s' % (max_col_len, str(col))
for max_col_len, col in zip(
max_col_lens, ('',) * self.row_dims + self.cols)
)
result += '\n'
# Print a separator row
result += '-+-'.join(
'-' * max_col_len
for max_col_len in max_col_lens
)
result += '\n'
# Print the data rows
for row in self.rows:
if self.row_dims > 1:
result += ' | '.join(
'%*s' % (max_col_len, value)
for max_col_len, value in zip(
max_col_lens,
[
str(head).strip()
for head in row
] +
[
str(self.data[(row, col)]).strip()
for col in self.cols
]
)
)
else:
result += ' | '.join(
'%*s' % (max_col_len, value)
for max_col_len, value in zip(
max_col_lens,
[str(row).strip()] +
[
str(self.data[(row, col)]).strip()
for col in self.cols
]
)
)
result += '\n'
if sys.version_info.major < 3:
return result.encode('utf-8')
return result
|
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = [
"Greg Caporaso",
"Justin Kuczynski",
"Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from qiime.util import make_option
from os import makedirs
from qiime.util import (load_qiime_config,
parse_command_line_parameters,
get_options_lookup)
from qiime.parse import parse_qiime_parameters
from qiime.workflow.util import (print_commands,
call_commands_serially,
print_to_stdout,
no_status_updates,
validate_and_set_jobs_to_start)
from qiime.workflow.downstream import run_jackknifed_beta_diversity
script_info = {}
script_info['brief_description'] = """A workflow script for performing jackknifed\
UPGMA clustering and build jackknifed 2d and 3D PCoA plots."""
script_info['script_description'] = """To directly measure the robustness of\
individual UPGMA clusters and clusters in PCoA plots, one can\
perform jackknifing (repeatedly resampling a subset of the available data\
from each sample)."""
script_info['script_usage'] = []
script_info['script_usage'].append(("""Example:""", """These steps are performed\
by the following command: Compute beta diversity distance matrix from otu\
table (and tree, if applicable); build rarefied OTU tables by evenly sampling\
to the specified depth (-e); build UPGMA tree from full distance matrix;\
compute distance matrics for rarefied OTU tables; build UPGMA trees from\
rarefied OTU table distance matrices; build a consensus tree from the rarefied\
UPGMA trees; compare rarefied OTU table distance matrix UPGMA trees to either\
(full or consensus) tree for jackknife support of tree nodes; perform\
principal coordinates analysis on distance matrices generated from rarefied\
OTU tables; generate 2D and 3D PCoA plots with jackknifed support.
""", """%prog -i otu_table.biom -o bdiv_jk100 -e 100 -m Fasting_Map.txt\
-t rep_set.tre"""))
script_info['script_usage_output_to_remove'] = ['bdiv_jk100']
script_info['output_description'] = """This scripts results in several distance\
matrices (from beta_diversity.py), several rarified OTU tables\
(from multiple_rarefactions_even_depth.py), several UPGMA trees (from upgma_cluster.py),\
a supporting file and newick tree with support values (from tree_compare.py),\
and 2D and 3D PCoA plots."""
qiime_config = load_qiime_config()
options_lookup = get_options_lookup()
script_info['required_options'] = [
make_option('-i', '--otu_table_fp', type='existing_filepath',
help='the input OTU table in biom format [REQUIRED]'),
make_option('-o', '--output_dir', type='new_dirpath',
help='the output directory [REQUIRED]'),
make_option('-e', '--seqs_per_sample', type='int',
help='number of sequences to include in each jackknifed subset' +
' [REQUIRED]'),
make_option('-m', '--mapping_fp', type='existing_filepath',
help='path to the mapping file [REQUIRED]'),
]
script_info['optional_options'] = [
make_option('-t', '--tree_fp', type='existing_filepath',
help='path to the tree file [default: %default; ' +
'REQUIRED for phylogenetic measures]'),
make_option('-p', '--parameter_fp', type='existing_filepath',
help='path to the parameter file, which specifies changes' +
' to the default behavior. ' +
'See http://www.qiime.org/documentation/file_formats.html#qiime-parameters .' +
' [if omitted, default values will be used]'),
make_option('--master_tree', default="consensus",
type='choice', choices=['consensus', 'full'],
help='method for computing master trees in jackknife analysis.' +
' "consensus": consensus of trees from jackknifed otu tables. ' +
' "full": tree generated from input (unsubsambled) otu table. ' +
' [default: %default]'),
make_option('-f', '--force', action='store_true',
dest='force', help='Force overwrite of existing output directory' +
' (note: existing files in output_dir will not be removed)' +
' [default: %default]'),
make_option('-w', '--print_only', action='store_true',
dest='print_only', help='Print the commands but don\'t call them -- ' +
'useful for debugging [default: %default]', default=False),
make_option('-a', '--parallel', action='store_true',
dest='parallel', default=False,
help='Run in parallel where available [default: %default]'),
options_lookup['jobs_to_start_workflow']
]
script_info['version'] = __version__
def main():
option_parser, opts, args = \
parse_command_line_parameters(**script_info)
verbose = opts.verbose
otu_table_fp = opts.otu_table_fp
output_dir = opts.output_dir
tree_fp = opts.tree_fp
seqs_per_sample = opts.seqs_per_sample
verbose = opts.verbose
print_only = opts.print_only
master_tree = opts.master_tree
parallel = opts.parallel
# No longer checking that jobs_to_start > 2, but
# commenting as we may change our minds about this.
#if parallel: raise_error_on_parallel_unavailable()
if opts.parameter_fp:
try:
parameter_f = open(opts.parameter_fp, 'U')
except IOError:
raise IOError("Can't open parameters file (%s). Does it exist? Do you have read access?"
% opts.parameter_fp)
params = parse_qiime_parameters(parameter_f)
parameter_f.close()
else:
params = parse_qiime_parameters([])
# empty list returns empty defaultdict for now
jobs_to_start = opts.jobs_to_start
default_jobs_to_start = qiime_config['jobs_to_start']
validate_and_set_jobs_to_start(params,
jobs_to_start,
default_jobs_to_start,
parallel,
option_parser)
try:
makedirs(output_dir)
except OSError:
if opts.force:
pass
else:
# Since the analysis can take quite a while, I put this check
# in to help users avoid overwriting previous output.
option_parser.error("Output directory already exists. Please choose"
" a different directory, or force overwrite with -f.")
if print_only:
command_handler = print_commands
else:
command_handler = call_commands_serially
if verbose:
status_update_callback = print_to_stdout
else:
status_update_callback = no_status_updates
run_jackknifed_beta_diversity(otu_table_fp=otu_table_fp,
tree_fp=tree_fp,
seqs_per_sample=seqs_per_sample,
output_dir=output_dir,
command_handler=command_handler,
params=params,
qiime_config=qiime_config,
mapping_fp=opts.mapping_fp,
parallel=parallel,
status_update_callback=status_update_callback,
master_tree=master_tree)
if __name__ == "__main__":
main()
|
"""
Geography for one person and all his descendant
"""
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
import operator
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GLib
import logging
_LOG = logging.getLogger("GeoGraphy.geomoves")
from gramps.gen.lib import EventRoleType, EventType
from gramps.gen.config import config
from gramps.gen.datehandler import displayer
from gramps.gen.display.name import displayer as _nd
from gramps.gen.display.place import displayer as _pd
from gramps.gen.utils.place import conv_lat_lon
from gramps.gui.views.bookmarks import PersonBookmarks
from gramps.plugins.lib.maps import constants
from gramps.plugins.lib.maps.geography import GeoGraphyView
_UI_DEF = [
'''
<placeholder id="CommonGo">
<section>
<item>
<attribute name="action">win.Back</attribute>
<attribute name="label" translatable="yes">_Back</attribute>
</item>
<item>
<attribute name="action">win.Forward</attribute>
<attribute name="label" translatable="yes">_Forward</attribute>
</item>
</section>
<section>
<item>
<attribute name="action">win.HomePerson</attribute>
<attribute name="label" translatable="yes">_Home</attribute>
</item>
</section>
</placeholder>
''',
'''
<section id='CommonEdit' groups='RW'>
<item>
<attribute name="action">win.PrintView</attribute>
<attribute name="label" translatable="yes">_Print...</attribute>
</item>
</section>
''',
'''
<section id="AddEditBook">
<item>
<attribute name="action">win.AddBook</attribute>
<attribute name="label" translatable="yes">_Add Bookmark</attribute>
</item>
<item>
<attribute name="action">win.EditBook</attribute>
<attribute name="label" translatable="no">%s...</attribute>
</item>
</section>
''' % _('Organize Bookmarks'), # Following are the Toolbar items
'''
<placeholder id='CommonNavigation'>
<child groups='RO'>
<object class="GtkToolButton">
<property name="icon-name">go-previous</property>
<property name="action-name">win.Back</property>
<property name="tooltip_text" translatable="yes">'''
'''Go to the previous object in the history</property>
<property name="label" translatable="yes">_Back</property>
<property name="use-underline">True</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
<child groups='RO'>
<object class="GtkToolButton">
<property name="icon-name">go-next</property>
<property name="action-name">win.Forward</property>
<property name="tooltip_text" translatable="yes">'''
'''Go to the next object in the history</property>
<property name="label" translatable="yes">_Forward</property>
<property name="use-underline">True</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
<child groups='RO'>
<object class="GtkToolButton">
<property name="icon-name">go-home</property>
<property name="action-name">win.HomePerson</property>
<property name="tooltip_text" translatable="yes">'''
'''Go to the default person</property>
<property name="label" translatable="yes">_Home</property>
<property name="use-underline">True</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
</placeholder>
''',
'''
<placeholder id='BarCommonEdit'>
<child groups='RO'>
<object class="GtkToolButton">
<property name="icon-name">document-print</property>
<property name="action-name">win.PrintView</property>
<property name="tooltip_text" translatable="yes">'''
'''Print or save the Map</property>
<property name="label" translatable="yes">_Print...</property>
<property name="use-underline">True</property>
</object>
<packing>
<property name="homogeneous">False</property>
</packing>
</child>
</placeholder>
''']
class GeoMoves(GeoGraphyView):
"""
The view used to render all places visited by one person and all
his descendants.
"""
CONFIGSETTINGS = (
('geography.path', constants.GEOGRAPHY_PATH),
('geography.zoom', 10),
('geography.zoom_when_center', 12),
('geography.show_cross', True),
('geography.lock', True),
('geography.center-lat', 0.0),
('geography.center-lon', 0.0),
('geography.use-keypad', True),
('geography.map_service', constants.OPENSTREETMAP),
('geography.max_places', 5000),
# specific to geoclose :
('geography.color_base', 'orange'),
('geography.maximum_generations', 10),
('geography.generation_interval', 500),
)
def __init__(self, pdata, dbstate, uistate, nav_group=0):
GeoGraphyView.__init__(self, _("Descendance of the active person."),
pdata, dbstate, uistate,
PersonBookmarks,
nav_group)
self.dbstate = dbstate
self.uistate = uistate
self.place_list = []
self.place_without_coordinates = []
self.minlat = self.maxlat = self.minlon = self.maxlon = 0.0
self.started = False
self.minyear = 9999
self.maxyear = 0
self.nbplaces = 0
self.nbmarkers = 0
self.sort = []
self.tracks = []
self.additional_uis.append(self.additional_ui())
self.skip_list = []
self.track = []
self.place_list_active = []
self.place_list_ref = []
self.cal = config.get('preferences.calendar-format-report')
self.markers_by_level = dict()
self.count = dict()
self.no_show_places_in_status_bar = False
self.person_list = []
def get_title(self):
"""
Used to set the titlebar in the configuration window.
"""
return _('GeoMoves')
def get_stock(self):
"""
Returns the name of the stock icon to use for the display.
This assumes that this icon has already been registered
as a stock icon.
"""
return 'geo-show-family'
def get_viewtype_stock(self):
"""Type of view in category
"""
return 'geo-show-family'
def additional_ui(self):
"""
Specifies the UIManager XML code that defines the menus and buttons
associated with the interface.
"""
return _UI_DEF
def navigation_type(self):
"""
Indicates the navigation type. Navigation type can be the string
name of any of the primary objects.
"""
return 'Person'
def goto_handle(self, handle=None):
"""
Rebuild the tree with the given family handle as reference.
"""
if not self.started:
self.started = True
self.place_list_active = []
self.place_list_ref = []
self.sort = []
self.remove_all_gps()
self.remove_all_markers()
self.lifeway_layer.clear_ways()
self.date_layer.clear_dates()
active = self.get_active()
if active:
person = self.dbstate.db.get_person_from_handle(active)
self._createmap(person)
self.uistate.modify_statusbar(self.dbstate)
def build_tree(self):
"""
This is called by the parent class when the view becomes visible. Since
all handling of visibility is now in rebuild_trees, see that for more
information.
"""
self.place_list_active = []
self.place_list_ref = []
self.sort = []
self.places_found = []
self.place_without_coordinates = []
self.remove_all_gps()
self.remove_all_markers()
self.lifeway_layer.clear_ways()
self.date_layer.clear_dates()
self.message_layer.clear_messages()
def draw(self, menu, marks, color):
"""
Create all displacements for one person's events.
"""
points = []
mark = None
date = " "
for mark in marks:
startlat = float(mark[3])
startlon = float(mark[4])
not_stored = True
for idx in range(0, len(points)):
if points[idx][0] == startlat and points[idx][1] == startlon:
not_stored = False
if not_stored:
points.append((startlat, startlon))
if mark[6] is not None:
date = mark[6]
if date != " ":
self.date_layer.add_date(date[0:4])
self.lifeway_layer.add_way(points, color)
return False
def _createmap_for_one_person(self, person, color):
"""
Create all markers for each people's event in the database which has
a lat/lon.
"""
self.place_list = []
dbstate = self.dbstate
if person is not None:
# For each event, if we have a place, set a marker.
for event_ref in person.get_event_ref_list():
if not event_ref:
continue
event = dbstate.db.get_event_from_handle(event_ref.ref)
role = event_ref.get_role()
try:
date = event.get_date_object().to_calendar(self.cal)
except:
continue
eyear = str("%04d" % date.get_year()) + \
str("%02d" % date.get_month()) + \
str("%02d" % date.get_day())
place_handle = event.get_place_handle()
if place_handle:
place = dbstate.db.get_place_from_handle(place_handle)
if place:
longitude = place.get_longitude()
latitude = place.get_latitude()
latitude, longitude = conv_lat_lon(latitude,
longitude, "D.D8")
descr = _pd.display(dbstate.db, place)
evt = EventType(event.get_type())
descr1 = _("%(eventtype)s : %(name)s") % {
'eventtype': evt,
'name': _nd.display(person)}
# place.get_longitude and place.get_latitude return
# one string. We have coordinates when the two values
# contains non null string.
if longitude and latitude:
self._append_to_places_list(descr, evt,
person.gramps_id,
latitude, longitude,
descr1, eyear,
event.get_type(),
person.gramps_id,
place.gramps_id,
event.gramps_id,
role
)
else:
self._append_to_places_without_coord(
place.gramps_id, descr)
family_list = person.get_family_handle_list()
descr1 = " - "
for family_hdl in family_list:
family = self.dbstate.db.get_family_from_handle(family_hdl)
if family is not None:
fhandle = family_list[0] # first is primary
fam = dbstate.db.get_family_from_handle(fhandle)
mother = father = None
handle = fam.get_father_handle()
if handle:
father = dbstate.db.get_person_from_handle(handle)
if father:
descr1 = "%s - " % _nd.display(father)
handle = fam.get_mother_handle()
if handle:
mother = dbstate.db.get_person_from_handle(handle)
if mother:
descr1 = "%s%s" % (descr1, _nd.display(mother))
for event_ref in family.get_event_ref_list():
if event_ref:
event = dbstate.db.get_event_from_handle(
event_ref.ref)
role = event_ref.get_role()
if event.get_place_handle():
place_handle = event.get_place_handle()
if place_handle:
place = dbstate.db.get_place_from_handle(
place_handle)
if place:
longitude = place.get_longitude()
latitude = place.get_latitude()
latitude, longitude = conv_lat_lon(
latitude, longitude, "D.D8")
descr = _pd.display(dbstate.db, place)
evt = EventType(
event.get_type())
eyear = str(
"%04d" % event.get_date_object().to_calendar(self.cal).get_year()) + \
str("%02d" % event.get_date_object().to_calendar(self.cal).get_month()) + \
str("%02d" % event.get_date_object().to_calendar(self.cal).get_day())
if longitude and latitude:
self._append_to_places_list(
descr,
evt,
person.gramps_id,
latitude, longitude,
descr1, eyear,
event.get_type(),
person.gramps_id,
place.gramps_id,
event.gramps_id,
role
)
else:
self._append_to_places_without_coord(place.gramps_id, descr)
sort1 = sorted(self.place_list, key=operator.itemgetter(1, 6))
self.draw(None, sort1, color)
# merge with the last results
merge_list = self.sort
for the_event in sort1:
if the_event not in merge_list:
merge_list.append(the_event)
self.sort = sorted(merge_list, key=operator.itemgetter(6))
def _add_person_to_list(self, person_id, level):
"""
Create a list of uniq person.
"""
if [person_id, level] not in self.person_list:
self.person_list.append([person_id, level])
try:
self.count[level] += 1
except:
self.count[level] = 1
def _prepare_for_one_family(self, family, level, curlevel):
"""
Create all markers for one family : all event's places with a lat/lon.
"""
dbstate = self.dbstate
person = None
try:
person = dbstate.db.get_person_from_handle(
family.get_father_handle())
except:
return
family_id = family.gramps_id
if person is None: # family without father ?
handle = family.get_mother_handle()
if handle:
person = dbstate.db.get_person_from_handle(handle)
if person is None:
handle = self.uistate.get_active('Person')
if handle:
person = dbstate.db.get_person_from_handle(handle)
if person is not None:
self._add_person_to_list(person.gramps_id, curlevel-1)
family_list = person.get_family_handle_list()
for fhandle in family_list:
fam = dbstate.db.get_family_from_handle(fhandle)
father = mother = None
handle = fam.get_father_handle()
if handle:
father = dbstate.db.get_person_from_handle(handle)
if father:
self._createmap_for_next_level(father, level-1, level)
self._add_person_to_list(father.gramps_id, curlevel-1)
handle = fam.get_mother_handle()
if handle:
mother = dbstate.db.get_person_from_handle(handle)
if mother:
self._createmap_for_next_level(father, level-1, level)
self._add_person_to_list(mother.gramps_id, curlevel-1)
index = 0
child_ref_list = fam.get_child_ref_list()
if child_ref_list:
for child_ref in child_ref_list:
child = dbstate.db.get_person_from_handle(child_ref.ref)
if child:
index += 1
self._createmap_for_next_level(child, level,
curlevel)
self._add_person_to_list(child.gramps_id, curlevel)
def _createmap_for_one_level(self, family, level, curlevel):
"""
if maximum generation is not reached, show next level.
"""
if level < curlevel:
return
self._prepare_for_one_family(family, level, curlevel+1)
def _createmap_for_next_level(self, person, level, curlevel):
"""
if maximum generation is not reached, show next level.
"""
if level < curlevel:
return
try:
if person not in self.markers_by_level[curlevel]:
self.markers_by_level[curlevel].append(person)
except:
self.markers_by_level[curlevel] = []
self.markers_by_level[curlevel].append(person)
for family in person.get_family_handle_list():
fam = self.dbstate.db.get_family_from_handle(family)
self._createmap_for_one_level(fam, level, curlevel)
if person not in self.markers_by_level[curlevel]:
self.markers_by_level[curlevel].append(person)
def _createmap(self, person):
"""
Create all markers for each family's person in the database which has
a lat/lon.
"""
dbstate = self.dbstate
self.cal = config.get('preferences.calendar-format-report')
self.place_list = []
self.person_list = []
self.count = dict()
self.place_without_coordinates = []
self.minlat = self.maxlat = self.minlon = self.maxlon = 0.0
latitude = ""
longitude = ""
self.places_found = []
self.nbplaces = 0
self.nbmarkers = 0
self.message_layer.clear_messages()
self.place_without_coordinates = []
self.minlat = self.maxlat = self.minlon = self.maxlon = 0.0
if person is None:
handle = self.uistate.get_active('Person')
if handle:
person = self.dbstate.db.get_person_from_handle(handle)
if not person:
return
self.message_layer.add_message(
_("All descendance for %s") % _nd.display(person))
color = Gdk.color_parse(self._config.get('geography.color_base'))
GLib.timeout_add(int(self._config.get("geography.generation_interval")),
self.animate_moves, 0, person, color)
def animate_moves(self, index, person, color):
"""
Animate all moves for one generation.
"""
self.markers_by_level = dict()
self._createmap_for_next_level(person, index, 0)
try:
persons = self.markers_by_level[index]
except:
return
for people in persons:
family_list = people.get_family_handle_list()
for fhandle in family_list:
family = self.dbstate.db.get_family_from_handle(fhandle)
self._prepare_for_one_family(family, index, index+1)
new_list = []
for plx, level in self.person_list:
plxp = self.dbstate.db.get_person_from_gramps_id(plx)
birth = "0000"
death = "0000"
low_date = "9999"
high_date = "0000"
for event_ref in plxp.get_event_ref_list():
if not event_ref:
continue
event = self.dbstate.db.get_event_from_handle(event_ref.ref)
role = event_ref.get_role()
try:
date = event.get_date_object().to_calendar(self.cal)
fyear = str("%04d" % date.get_year())
if event.get_type() == EventType.BIRTH:
birth = fyear
if event.get_type() == EventType.DEATH:
death = fyear
if fyear < low_date:
low_date = fyear
if fyear > high_date:
high_date = fyear
except:
pass
if birth == "0000":
birth = low_date
if death == "0000":
death = high_date
new_list.append([level, plxp, birth, death])
pidx = 0
if isinstance(color, str):
color = Gdk.color_parse(color)
for (level, plxp,
birth, death) in sorted(new_list, key=operator.itemgetter(0, 2)):
if index == int(self._config.get("geography.maximum_generations")):
break
if level == index:
pidx += 1
self._createmap_for_one_person(plxp, color)
color.red = (float(color.red - (index)*3000)%65535)
if index % 2:
color.green = float((color.green + (index)*3000)%65535)
else:
color.blue = float((color.blue + (index)*3000)%65535)
self._createmap_for_one_person(person, color)
if index < int(self._config.get("geography.maximum_generations")):
time_to_wait = int(
self._config.get("geography.generation_interval"))
self._create_markers()
# process next generation in a few milliseconds
GLib.timeout_add(int(time_to_wait), self.animate_moves,
index+1, person, color)
else:
self.started = False
return False
def bubble_message(self, event, lat, lon, marks):
"""
Create the menu for the selected marker
"""
self.menu = Gtk.Menu()
menu = self.menu
menu.set_title("descendance")
events = []
message = ""
oldplace = ""
prevmark = None
# Be sure all markers are sorted by place then dates.
for mark in sorted(marks, key=operator.itemgetter(0, 6)):
if mark[10] in events:
continue # avoid duplicate events
else:
events.append(mark[10])
if mark[0] != oldplace:
message = "%s :" % mark[0]
self.add_place_bubble_message(event, lat, lon,
marks, menu,
message, mark)
oldplace = mark[0]
message = ""
evt = self.dbstate.db.get_event_from_gramps_id(mark[10])
# format the date as described in preferences.
date = displayer.display(evt.get_date_object())
if date == "":
date = _("Unknown")
if mark[11] == EventRoleType.PRIMARY:
person = self.dbstate.db.get_person_from_gramps_id(mark[1])
message = "(%s) %s : %s" % (date, mark[2], _nd.display(person))
elif mark[11] == EventRoleType.FAMILY:
(father_name,
mother_name) = self._get_father_and_mother_name(evt)
message = "(%s) %s : %s - %s" % (date, mark[2],
father_name,
mother_name)
else:
descr = evt.get_description()
if descr == "":
descr = _('No description')
message = "(%s) %s => %s" % (date, mark[11], descr)
prevmark = mark
add_item = Gtk.MenuItem(label=message)
add_item.show()
menu.append(add_item)
self.itemoption = Gtk.Menu()
itemoption = self.itemoption
itemoption.set_title(message)
itemoption.show()
add_item.set_submenu(itemoption)
modify = Gtk.MenuItem(label=_("Edit Event"))
modify.show()
modify.connect("activate", self.edit_event,
event, lat, lon, prevmark)
itemoption.append(modify)
center = Gtk.MenuItem(label=_("Center on this place"))
center.show()
center.connect("activate", self.center_here,
event, lat, lon, prevmark)
itemoption.append(center)
person = self.dbstate.db.get_person_from_gramps_id(mark[8])
hdle = person.get_handle()
bookm = Gtk.MenuItem(label=_("Bookmark this person"))
bookm.show()
bookm.connect("activate", self.add_bookmark_from_popup, hdle)
itemoption.append(bookm)
menu.show()
menu.popup(None, None, None,
None, event.button, event.time)
return 1
def add_specific_menu(self, menu, event, lat, lon):
"""
Add specific entry to the navigation menu.
"""
return
def get_default_gramplets(self):
"""
Define the default gramplets for the sidebar and bottombar.
"""
return (("Person Filter",),
())
def specific_options(self, configdialog):
"""
Add specific entry to the preference menu.
Must be done in the associated view.
"""
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
configdialog.add_text(grid,
_('The maximum number of generations.\n'),
1, line_wrap=False)
configdialog.add_slider(grid,
"",
2, 'geography.maximum_generations',
(1, 20))
configdialog.add_text(grid,
_('Time in milliseconds between drawing two generations.\n'),
3, line_wrap=False)
configdialog.add_slider(grid,
"",
4, 'geography.generation_interval',
(500, 3000))
return _('The parameters for moves'), grid
def config_connect(self):
"""
used to monitor changes in the ini file
"""
self._config.connect('geography.maximum_generations',
self._maximum_generations)
def _maximum_generations(self, client, cnxn_id, entry, data):
"""
Called when the number of nomber of generations change
"""
self.goto_handle(handle=None)
|
import pygame, string, time
from gameIo import InputHandler, LoopTimer
from display import TextDisplay
pygame.init()
class ScreenKeyboard:
# initializes the characters for the keyboard
def __init__(self, letters, screen, inputHandler, font, displayFont, fontColor, displayString, gameDisplayer):
self.font = font
self.displayFont = displayFont
self.fontColor = fontColor
self.screen = screen
self.allLetters = letters
self.inputHandler = inputHandler
self.gameDisplayer = gameDisplayer
self.speedScale = .1
self.displayer = TextDisplay(displayString, self.screen, self.displayFont, 0)
#Assumes using a font where all letters are equal width
self.letterSize = self.font.size("A")
self.displayHeight = self.displayFont.size("A")[1]
# initializes the elements of the display
def init(self):
self.setup_grid()
self.setup_display()
def setup_grid(self):
numLettersInRow = int(self.screen.get_width() / self.letterSize[0])
self.letterArray = []
letterNum = 0
while letterNum < len(self.allLetters):
self.letterArray.append([])
for i in range(numLettersInRow):
text = self.font.render(self.allLetters[letterNum].replace(" ", "_"), True, self.fontColor)
self.letterArray[-1].append((self.allLetters[letterNum], text))
letterNum += 1
if letterNum >= len(self.allLetters):
break
# displays the keyboard to the user
def setup_display(self):
self.textSurface = pygame.Surface((self.screen.get_width(), self.letterSize[1] * len(self.letterArray)))
self.textSurfOffset = self.displayer.textYPos + self.letterSize[1]
self.textRects = []
for i in range(len(self.letterArray)):
self.textRects.append([])
for j in range(len(self.letterArray[i])):
rect = pygame.Rect(j * self.letterSize[0], i * self.letterSize[1], self.letterSize[0], self.letterSize[1])
self.textRects[-1].append(rect.move(0, self.textSurfOffset))
self.textSurface.blit(self.letterArray[i][j][1], rect)
self.textSurface.set_alpha(200)
def display(self, enteredText, selectedPos):
self.displayer.draw()
self.screen.fill((0, 0, 0))
numberText = self.displayFont.render("Entered Text: " + enteredText, 1, self.fontColor)
self.screen.blit(numberText, (0, self.textSurfOffset - self.letterSize[1]))
self.screen.fill((0, 255, 255), self.textRects[selectedPos[0]][selectedPos[1]])
self.screen.blit(self.textSurface, (0, self.textSurfOffset))
pygame.display.flip()
def get_name_from_usr(self):
print("keybaord")
enteredText = ""
loopTimer = LoopTimer(60*10)
done = False
while not done:
loopTimer.start()
buttons = [False, False, False]
selectedPos = [0,0]
selectedPosInt = [0,0]
while not buttons[2]:
self.inputHandler.event_handle()
cartJoyPos, buttons = self.inputHandler.get_input()
cartJoyPos = [cartJoyPos[1], cartJoyPos[0]]
# correct the y cord first so the x can be corrected
for i in [0, 1]:
selectedPos[i] += cartJoyPos[i] * self.speedScale
if selectedPos[i] < 0:
selectedPos[i] = 0
if i == 0:
sizeLimit = len(self.letterArray)-1
else:
sizeLimit = len(self.letterArray[selectedPosInt[0]])-1
if selectedPos[i] > sizeLimit:
selectedPos[i] = sizeLimit
selectedPosInt[i] = int(selectedPos[i]+.5)
#enter selected num
if buttons[0]:
enteredText += self.letterArray[selectedPosInt[0]][selectedPosInt[1]][0]
#backspace
if buttons[1]:
enteredText = enteredText[0:-1]
#exit if timed out
if loopTimer.is_over():
return enteredText
self.display(enteredText, selectedPosInt)
self.gameDisplayer.display_game()
time.sleep(.002)
tConfirm = self.displayFont.render("Your student number is", 1, (255, 0, 0))
tQuestion = self.displayFont.render("Is this correct? Press trigger for yes, 2 for no", 1, (255, 0, 0))
tEntered = self.displayFont.render(enteredText, 1, (255,0,0))
self.screen.fill((0, 0, 0))
self.screen.blit(tConfirm, (0, 0))
self.screen.blit(tEntered, (0, self.displayHeight))
self.screen.blit(tQuestion, (0, self.displayHeight * 2))
self.gameDisplayer.display_game()
loopTimer.start()
#wait for done response
while(True):
self.inputHandler.event_handle()
j, buttons = self.inputHandler.get_input()
if buttons[0]:
done = True
break
if buttons[1]:
done = False
break
#exit if timed out
if loopTimer.is_over():
return enteredText
return enteredText
|
from __future__ import absolute_import, division, unicode_literals
import hashlib
import re
from binascii import unhexlify
from collections import OrderedDict
from .changegroup import (
ParentsTrait,
RawRevChunk,
)
from ..git import NULL_NODE_ID
from ..util import (
check_enabled,
TypedProperty,
)
try:
if check_enabled('no-mercurial'):
raise ImportError('Do not use mercurial')
from mercurial.mdiff import textdiff # noqa: F401
except ImportError:
from ..bdiff import bdiff as textdiff # noqa: F401
class Authorship(object):
__slots__ = ('name', 'email', 'timestamp', 'utcoffset')
WHO_RE = re.compile(b'^(?P<name>.*?) ?(?:\\<(?P<email>.*?)\\>)')
@classmethod
def from_hg_str(cls, s, maybe_git_utcoffset=False):
return cls.from_hg(*s.rsplit(b' ', 2),
maybe_git_utcoffset=maybe_git_utcoffset)
@classmethod
def from_hg(cls, who, timestamp, utcoffset, maybe_git_utcoffset=False):
match = cls.WHO_RE.match(who)
def cleanup(x):
return x.replace(b'<', b'').replace(b'>', b'')
if match:
name = cleanup(match.group('name'))
email = cleanup(match.group('email'))
elif b'@' in who:
name = b''
email = cleanup(who)
else:
name = cleanup(who)
email = b''
# The UTC offset in mercurial author info is in seconds, formatted as
# %d. It also has an opposite sign compared to traditional UTC offsets.
# However, committer info stored in mercurial by hg-git can have
# git-style UTC offsets, in the form [+-]hhmm.
# When what we have is in the form +xxxx or -0yyy, it is obviously the
# latter. When it's -1yyy, it could be either, so we assume that a
# valid UTC offset is always a multiple of 15 minutes. By that
# definition, a number between -1000 and -1800 can't be simultaneously
# a valid UTC offset in seconds and a valid UTC offset in hhmm form.
# (cf. https://en.wikipedia.org/wiki/List_of_UTC_time_offsets lists
# there exist a few 15-minutes aligned time zones, but they don't match
# anything that could match here anyways, but just in case someone one
# day creates one, assume it won't be finer grained)
if maybe_git_utcoffset and isinstance(utcoffset, bytes):
is_git = False
if utcoffset.startswith((b'+', b'-0')):
is_git = True
elif utcoffset.startswith(b'-1'):
utcoffset = int(utcoffset)
if (utcoffset > -1800 and utcoffset % 900 != 0 and
(utcoffset % 100) % 15 == 0):
is_git = True
if is_git:
return cls.from_git(b'%s <%s>' % (name, email),
timestamp, utcoffset)
result = cls()
result.name = name
result.email = email
result.timestamp = int(timestamp)
result.utcoffset = int(utcoffset)
return result
@classmethod
def from_git_str(cls, s):
return cls.from_git(*s.rsplit(b' ', 2))
@classmethod
def from_git(cls, who, timestamp, utcoffset):
result = cls()
match = cls.WHO_RE.match(who)
# We don't ever expect a git `who` information to not match the regexp,
# as git is very conservative in what it accepts.
assert match
result.name = match.group('name')
result.email = match.group('email')
result.timestamp = int(timestamp)
utcoffset = int(utcoffset)
sign = (utcoffset < 0) - (utcoffset > 0)
utcoffset = abs(utcoffset)
utcoffset = (utcoffset // 100) * 60 + (utcoffset % 100)
result.utcoffset = sign * utcoffset * 60
return result
def to_git(self):
sign = b'+' if self.utcoffset <= 0 else b'-'
utcoffset = abs(self.utcoffset) // 60
utcoffset = b'%c%02d%02d' % (sign, utcoffset // 60, utcoffset % 60)
who = b'%s <%s>' % (self.name, self.email)
return who, (b'%d' % self.timestamp), utcoffset
def to_git_str(self):
return b' '.join(self.to_git())
def to_hg(self):
if self.name and self.email:
who = b'%s <%s>' % (self.name, self.email)
else:
who = self.name or b'<%s>' % self.email
return who, (b'%d' % self.timestamp), (b'%d' % self.utcoffset)
def to_hg_str(self):
return b' '.join(self.to_hg())
class HgObject(ParentsTrait):
__slots__ = ('node', 'parent1', 'parent2', 'changeset')
def __init__(self, node=NULL_NODE_ID, parent1=NULL_NODE_ID,
parent2=NULL_NODE_ID, changeset=NULL_NODE_ID):
(self.node, self.parent1, self.parent2, self.changeset) = (
node, parent1, parent2, changeset)
@classmethod
def from_chunk(cls, raw_chunk, delta_object=None):
assert isinstance(raw_chunk, RawRevChunk)
assert \
(delta_object is None and raw_chunk.delta_node == NULL_NODE_ID) or\
(isinstance(delta_object, cls) and
raw_chunk.delta_node == delta_object.node)
return cls(raw_chunk.node, raw_chunk.parent1, raw_chunk.parent2,
raw_chunk.changeset)
def to_chunk(self, raw_chunk_type, delta_object=None):
assert delta_object is None or isinstance(delta_object, type(self))
assert issubclass(raw_chunk_type, RawRevChunk)
raw_chunk = raw_chunk_type()
node = self.node if self.node != NULL_NODE_ID else self.sha1
(raw_chunk.node, raw_chunk.parent1, raw_chunk.parent2,
raw_chunk.changeset) = (node, self.parent1, self.parent2,
self.changeset)
if delta_object:
raw_chunk.delta_node = delta_object.node
raw_chunk.patch = self.diff(delta_object)
return raw_chunk
def diff(self, delta_object):
def flatten(s):
return s if isinstance(s, bytes) else bytes(s)
return textdiff(
flatten(delta_object.raw_data) if delta_object else b'',
flatten(self.raw_data))
@property
def sha1(self):
p1 = unhexlify(self.parent1)
p2 = unhexlify(self.parent2)
h = hashlib.sha1(min(p1, p2) + max(p1, p2))
h.update(self.raw_data)
return h.hexdigest().encode('ascii')
@property
def raw_data(self):
return b''.join(self._data_iter())
@raw_data.setter
def raw_data(self, data):
raise NotImplementedError(
'%s.raw_data is not implemented' % self.__class__.__name__)
def _data_iter(self):
raise NotImplementedError(
'%s._data_iter is not implemented' % self.__class__.__name__)
class File(HgObject):
__slots__ = ('content', '__weakref__')
def __init__(self, *args, **kwargs):
super(File, self).__init__(*args, **kwargs)
self.content = b''
self.metadata = {}
@classmethod
def from_chunk(cls, raw_chunk, delta_file=None):
this = super(File, cls).from_chunk(raw_chunk, delta_file)
this.raw_data = raw_chunk.patch.apply(
delta_file.raw_data if delta_file else b'')
return this
@HgObject.raw_data.setter
def raw_data(self, data):
if data.startswith(b'\1\n'):
_, self.metadata, self.content = data.split(b'\1\n', 2)
else:
self.content = data
class Metadata(OrderedDict):
@classmethod
def from_str(cls, s):
return cls(
l.split(b': ', 1)
for l in s.splitlines()
)
@classmethod
def from_dict(cls, d):
if isinstance(d, OrderedDict):
return cls(d)
return cls(sorted(d.items()))
@classmethod
def from_obj(cls, obj):
if isinstance(obj, dict):
return cls.from_dict(obj)
return cls.from_str(obj)
def __str__(self):
raise RuntimeError('Use to_str()')
def to_str(self):
return b''.join(b'%s: %s\n' % i for i in self.items())
metadata = TypedProperty(Metadata)
def _data_iter(self):
metadata = self.metadata.to_str()
if metadata or self.content.startswith(b'\1\n'):
metadata = b'\1\n%s\1\n' % metadata
if metadata:
yield metadata
if self.content:
yield self.content
class Changeset(HgObject):
__slots__ = ('manifest', 'author', 'timestamp', 'utcoffset', 'body',
'__weakref__')
def __init__(self, *args, **kwargs):
super(Changeset, self).__init__(*args, **kwargs)
self.manifest = NULL_NODE_ID
self.author = b''
self.timestamp = b''
self.utcoffset = b''
self.files = []
self.body = b''
@classmethod
def from_chunk(cls, raw_chunk, delta_cs=None):
this = super(Changeset, cls).from_chunk(raw_chunk, delta_cs)
this.raw_data = raw_chunk.patch.apply(
delta_cs.raw_data if delta_cs else b'')
return this
@HgObject.raw_data.setter
def raw_data(self, data):
metadata, self.body = data.split(b'\n\n', 1)
lines = metadata.splitlines()
self.manifest, self.author, date = lines[:3]
date = date.split(b' ', 2)
self.timestamp = date[0]
self.utcoffset = date[1]
if len(date) == 3:
self.extra = date[2]
self.files = lines[3:]
files = TypedProperty(list)
class ExtraData(dict):
@classmethod
def from_str(cls, s):
return cls(i.split(b':', 1) for i in s.split(b'\0') if i)
@classmethod
def from_obj(cls, obj):
if obj is None:
return None
if isinstance(obj, dict):
return cls(obj)
return cls.from_str(obj)
def __str__(self):
raise RuntimeError('Use to_str()')
def to_str(self):
return b'\0'.join(b':'.join(i) for i in sorted(self.items()))
extra = TypedProperty(ExtraData)
def _data_iter(self):
yield self.manifest
yield b'\n'
yield self.author
yield b'\n'
yield self.timestamp
yield b' '
yield self.utcoffset
if self.extra is not None:
yield b' '
yield self.extra.to_str()
if self.files:
yield b'\n'
yield b'\n'.join(sorted(self.files))
yield b'\n\n'
yield self.body
@property
def changeset(self):
return self.node
@changeset.setter
def changeset(self, value):
assert value in (self.node, NULL_NODE_ID)
class ExtraProperty(object):
def __init__(self, name):
self._name = name.encode('ascii')
def __get__(self, obj, type=None):
if obj.extra is None:
return None
return obj.extra.get(self._name)
def __set__(self, obj, value):
if not value:
if obj.extra:
try:
del obj.extra[self._name]
except KeyError:
pass
if not obj.extra:
obj.extra = None
else:
if obj.extra is None:
obj.extra = {}
obj.extra[self._name] = value
branch = ExtraProperty('branch')
committer = ExtraProperty('committer')
close = ExtraProperty('close')
class Manifest(HgObject):
__slots__ = ('__weakref__', '_raw_data')
def __init__(self, *args, **kwargs):
super(Manifest, self).__init__(*args, **kwargs)
self._items = []
self._raw_data = None
class ManifestItem(bytes):
@classmethod
def from_info(cls, path, sha1=None, attr=b''):
if isinstance(path, cls):
return path
return cls(b'%s\0%s%s' % (path, sha1, attr))
@property
def path(self):
attr_len = len(self.attr)
assert self[-41 - attr_len:-40 - attr_len] == b'\0'
return self[:-41 - attr_len]
@property
def attr(self):
if self[-1] in b'lx':
return self[-1:]
return b''
@property
def sha1(self):
attr_len = len(self.attr)
if attr_len:
return self[-40 - attr_len:-attr_len]
return self[-40 - attr_len:]
class ManifestList(list):
def __init__(self, *args, **kwargs):
super(Manifest.ManifestList, self).__init__(*args, **kwargs)
self._last = None
def append(self, value):
assert isinstance(value, Manifest.ManifestItem)
assert self._last is None or value > self._last
super(Manifest.ManifestList, self).append(value)
self._last = value
_items = TypedProperty(ManifestList)
@property
def items(self):
if self._raw_data is not None:
self._items[:] = []
for line in self._raw_data.splitlines():
item = self.ManifestItem(line)
self._items.append(item)
self._raw_data = None
return self._items
def add(self, path, sha1=None, attr=b''):
item = Manifest.ManifestItem.from_info(path, sha1, attr)
self.items.append(item)
def __iter__(self):
return iter(self.items)
def _data_iter(self):
for item in self:
yield item
yield b'\n'
@classmethod
def from_chunk(cls, raw_chunk, delta_mn=None):
this = super(Manifest, cls).from_chunk(raw_chunk, delta_mn)
items = iter(delta_mn)
offset = 0
item = b''
for part in raw_chunk.patch:
while offset < part.start:
item = next(items, None)
if item is None:
break
this._items.append(item)
offset += len(item) + 1
assert offset == part.start
for item in part.text_data.tobytes().splitlines():
item = this.ManifestItem(item)
this._items.append(item)
while offset < part.end:
item = next(items, None)
if item is None:
break
offset += len(item) + 1
for item in items:
this._items.append(item)
return this
@property
def raw_data(self):
if self._raw_data is not None:
return self._raw_data
return super(Manifest, self).raw_data
@raw_data.setter
def raw_data(self, data):
self._raw_data = bytes(data)
|
from __future__ import print_function
from unicorn import *
from unicorn.x86_const import *
X86_CODE32 = b"\xeb\x19\x31\xc0\x31\xdb\x31\xd2\x31\xc9\xb0\x04\xb3\x01\x59\xb2\x05\xcd\x80\x31\xc0\xb0\x01\x31\xdb\xcd\x80\xe8\xe2\xff\xff\xff\x68\x65\x6c\x6c\x6f"
X86_CODE32_SELF = b"\xeb\x1c\x5a\x89\xd6\x8b\x02\x66\x3d\xca\x7d\x75\x06\x66\x05\x03\x03\x89\x02\xfe\xc2\x3d\x41\x41\x41\x41\x75\xe9\xff\xe6\xe8\xdf\xff\xff\xff\x31\xd2\x6a\x0b\x58\x99\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\xca\x7d\x41\x41\x41\x41\x41\x41\x41\x41"
X86_CODE64 = "\x48\x31\xff\x57\x57\x5e\x5a\x48\xbf\x2f\x2f\x62\x69\x6e\x2f\x73\x68\x48\xc1\xef\x08\x57\x54\x5f\x6a\x3b\x58\x0f\x05"
ADDRESS = 0x1000000
def hook_code(uc, address, size, user_data):
print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size))
# read this instruction code from memory
tmp = uc.mem_read(address, size)
print(">>> Instruction code at [0x%x] =" %(address), end="")
for i in tmp:
print(" %02x" %i, end="")
print("")
def hook_block(uc, address, size, user_data):
print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size))
def hook_intr(uc, intno, user_data):
# only handle Linux syscall
if intno != 0x80:
print("got interrupt %x ???" %intno);
uc.emu_stop()
return
eax = uc.reg_read(UC_X86_REG_EAX)
eip = uc.reg_read(UC_X86_REG_EIP)
if eax == 1: # sys_exit
print(">>> 0x%x: interrupt 0x%x, EAX = 0x%x" %(eip, intno, eax))
uc.emu_stop()
elif eax == 4: # sys_write
# ECX = buffer address
ecx = uc.reg_read(UC_X86_REG_ECX)
# EDX = buffer size
edx = uc.reg_read(UC_X86_REG_EDX)
try:
buf = uc.mem_read(ecx, edx)
print(">>> 0x%x: interrupt 0x%x, SYS_WRITE. buffer = 0x%x, size = %u, content = " \
%(eip, intno, ecx, edx), end="")
for i in buf:
print("%c" %i, end="")
print("")
except UcError as e:
print(">>> 0x%x: interrupt 0x%x, SYS_WRITE. buffer = 0x%x, size = %u, content = <unknown>\n" \
%(eip, intno, ecx, edx))
else:
print(">>> 0x%x: interrupt 0x%x, EAX = 0x%x" %(eip, intno, eax))
def hook_syscall(mu, user_data):
rax = mu.reg_read(UC_X86_REG_RAX)
print(">>> got SYSCALL with RAX = 0x%x" %(rax))
mu.emu_stop()
def test_i386(mode, code):
print("Emulate x86 code")
try:
# Initialize emulator
mu = Uc(UC_ARCH_X86, mode)
# map 2MB memory for this emulation
mu.mem_map(ADDRESS, 2 * 1024 * 1024)
# write machine code to be emulated to memory
mu.mem_write(ADDRESS, code)
# initialize stack
mu.reg_write(UC_X86_REG_ESP, ADDRESS + 0x200000)
# tracing all basic blocks with customized callback
mu.hook_add(UC_HOOK_BLOCK, hook_block)
# tracing all instructions with customized callback
mu.hook_add(UC_HOOK_CODE, hook_code)
# handle interrupt ourself
mu.hook_add(UC_HOOK_INTR, hook_intr)
# handle SYSCALL
mu.hook_add(UC_HOOK_INSN, hook_syscall, None, 1, 0, UC_X86_INS_SYSCALL)
# emulate machine code in infinite time
mu.emu_start(ADDRESS, ADDRESS + len(code))
# now print out some registers
print(">>> Emulation done")
except UcError as e:
print("ERROR: %s" % e)
if __name__ == '__main__':
test_i386(UC_MODE_32, X86_CODE32_SELF)
print("=" * 20)
test_i386(UC_MODE_32, X86_CODE32)
print("=" * 20)
test_i386(UC_MODE_64, X86_CODE64) # FIXME
|
from yade import pack,export,geom,timing,bodiesHandling
import time,numpy
radRAD=[23.658, #5000 elements
40.455, #25000 elements
50.97, #50000 elements
64.218, #100000 elements
80.91] #200000 elements
#109.811] #500000 elements
iterN=[12000, #5000 elements
2500, #25000 elements
1400, #50000 elements
800, #100000 elements
200] #200000 elements
#10] #500000 elements
coefCor=[110,
28,
18,
9,
2]
#0.1]
iterVel=[]
testTime=[]
particlesNumber=[]
numberTests = 3
tStartAll=time.time()
for z in range(numberTests):
for i in range(len(radRAD)):
rR = radRAD[i]
nbIter=iterN[i]
O.reset()
tc=0.001
en=.003
es=.003
frictionAngle=radians(35)
density=2300
defMat=O.materials.append(ViscElMat(density=density,frictionAngle=frictionAngle,tc=tc,en=en,et=es))
O.dt=.1*tc # time step
rad=0.5 # particle radius
tolerance = 0.0001
SpheresID=[]
SpheresID+=O.bodies.append(pack.regularHexa(pack.inSphere((Vector3(0.0,0.0,0.0)),rad),radius=rad/rR,gap=rad/rR*0.5,material=defMat))
geometryParameters = bodiesHandling.spheresPackDimensions(SpheresID)
print len(SpheresID)
floorId=[]
floorId+=O.bodies.append(geom.facetBox(geometryParameters['center'],geometryParameters['extends']/2.0*1.05,material=defMat)) #Floor
#Calculate the mass of spheres
sphMass = getSpheresVolume()*density
# Create engines
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Facet_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(), Ig2_Facet_Sphere_ScGeom()],
[Ip2_ViscElMat_ViscElMat_ViscElPhys()],
[Law2_ScGeom_ViscElPhys_Basic()],
),
NewtonIntegrator(damping=0,gravity=[0,0,-9.81]),
]
print "number of bodies %d"%len(O.bodies)
# Initialize the collider else it is not possible to compare the results with different nbIter
O.run(1,1)
O.timingEnabled=True
timing.reset()
tStart=time.time()
O.run(nbIter)
O.wait()
tEnd=time.time()
print
print 'Elapsed ', tEnd-tStart, ' sec'
print 'Performance ', nbIter/(tEnd-tStart), ' iter/sec'
print 'Extrapolation on 1e5 iters ', (tEnd-tStart)/nbIter*1e5/3600., ' hours'
print "=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*"
timing.stats()
iterVel += [nbIter/(tEnd-tStart)]
testTime += [tEnd-tStart]
particlesNumber += [len(O.bodies)]
tEndAll=time.time()
commonTime = tEndAll-tStartAll
print "Common time ", commonTime, "s"
print
print
scoreIterVel=0.0
for i in range(len(radRAD)):
iterAv=0.0
iterVelNumpy=numpy.empty(3)
for z in range(numberTests):
iterVelNumpy[z]=iterVel[z*len(radRAD)+i]
avgVel = numpy.average(iterVelNumpy)
dispVel = numpy.std(iterVelNumpy)/numpy.average(iterVelNumpy)*100.0
if (dispVel>10):
print "Calculation velocity is unstable, try to close all programs and start performance tests again"
print particlesNumber[i]," spheres, velocity=",avgVel, "+-",dispVel,"%"
scoreIterVel+=avgVel/coefCor[i]*1000.0
print
print
scoreIterVel = int(scoreIterVel)
print "SCORE: " + str(scoreIterVel)
print "Number of threads ", os.environ['OMP_NUM_THREADS']
print"___________________________________________________"
print "CPU info", os.system('cat /proc/cpuinfo')
sys.exit(0)
|
from django.core.urlresolvers import reverse
from django.http import HttpResponseBadRequest
from django.views.generic import View
from pulp.common import tags
from pulp.server.async.tasks import TaskResult
from pulp.server.auth import authorization
from pulp.server.controllers import consumer as consumer_controller
from pulp.server.db import model
from pulp.server.db.model.criteria import Criteria
from pulp.server.exceptions import (InvalidValue, MissingResource, MissingValue,
OperationPostponed, UnsupportedValue)
from pulp.server.managers import factory
from pulp.server.managers.consumer import bind
from pulp.server.managers.consumer import profile
from pulp.server.managers.consumer import query as query_manager
from pulp.server.managers.consumer.applicability import (regenerate_applicability_for_consumers,
retrieve_consumer_applicability)
from pulp.server.managers.schedule.consumer import (UNIT_INSTALL_ACTION, UNIT_UNINSTALL_ACTION,
UNIT_UPDATE_ACTION)
from pulp.server.webservices.views import search
from pulp.server.webservices.views.decorators import auth_required
from pulp.server.webservices.views.serializers import binding as serial_binding
from pulp.server.webservices.views.util import (_ensure_input_encoding,
generate_json_response,
generate_json_response_with_pulp_encoder,
generate_redirect_response,
parse_json_body)
def add_link(consumer):
"""
Add link to the consumer object.
:param consumer: consumer object
:type consumer: dict
:return: link containing the href
:rtype: dict
"""
link = {'_href': reverse('consumer_resource',
kwargs={'consumer_id': consumer['id']})}
consumer.update(link)
return link
def add_link_profile(consumer):
"""
Add link to the consumer profile object.
:param consumer: consumer profile object
:type consumer: dict
:return: link containing the href
:rtype: dict
"""
link = {'_href': reverse('consumer_profile_resource',
kwargs={'consumer_id': consumer['consumer_id'],
'content_type': consumer['content_type']})}
consumer.update(link)
return link
def add_link_schedule(schedule, action_type, consumer_id):
"""
Add link to the schedule object.
:param schedule: schedule object
:type schedule: dict
:param action_type: action type to perform
:type action_type: str
:param consumer_id: id of the consumer
:type consumer_id: str
:return: link containing the href
:rtype: dict
"""
action = action_type.split("_")[-1]
link = {'_href': reverse('schedule_content_%s_resource' % action,
kwargs={'consumer_id': consumer_id,
'schedule_id': schedule['_id']})}
schedule.update(link)
return link
def scheduled_unit_management_obj(scheduled_call):
"""
Modify scheduled unit management object.
:param scheduled_call: scheduled unit manag. object
:type scheduled_call: dict
:return: updated scheduled unit manag. object
:rtype: updated scheduled unit manag. object
"""
scheduled_call['options'] = scheduled_call['kwargs']['options']
scheduled_call['units'] = scheduled_call['kwargs']['units']
return scheduled_call
def expand_consumers(details, bindings, consumers):
"""
Expand a list of users based on the flag specified in the query parameters.
The _href is always added by the serialization function used.
Supported options:
details - include details
bindings - include bindings
:param details: if True, details will be included in the response
:type details: bool
:param bindings: if True, bindings will be included with each returned consumer
:type bindings: bool
:param consumers: A list of consumers
:type consumers: list
:return: A list of expanded consumers.
:rtype: list
"""
if details:
bindings = True
# add bindings
if bindings:
ids = [c['id'] for c in consumers]
manager = factory.consumer_bind_manager()
criteria = Criteria({'consumer_id': {'$in': ids}})
bindings = manager.find_by_criteria(criteria)
collated = {}
for b in bindings:
lst = collated.setdefault(b['consumer_id'], [])
lst.append(b)
for c in consumers:
c['bindings'] = [
serial_binding.serialize(b, False) for b in collated.get(c['id'], [])
]
return consumers
class ConsumersView(View):
"""
View for consumers.
"""
@auth_required(authorization.READ)
def get(self, request):
"""
List the available consumers.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:return: Response containing a list of consumers
:rtype: django.http.HttpResponse
"""
query_params = request.GET
details = query_params.get('details', 'false').lower() == 'true'
bindings = query_params.get('bindings', 'false').lower() == 'true'
manager = factory.consumer_query_manager()
consumers = expand_consumers(details, bindings, manager.find_all())
for consumer in consumers:
add_link(consumer)
return generate_json_response_with_pulp_encoder(consumers)
@auth_required(authorization.CREATE)
@parse_json_body(json_type=dict)
def post(self, request):
"""
Create a consumer and return a serialized object containing just created consumer.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:raises MissingValue: if ID is not provided
:return: Response containing the consumer
:rtype: django.http.HttpResponse
"""
params = request.body_as_json
consumer_id = params.get('id')
if consumer_id is None:
raise MissingValue(['id'])
display_name = params.get('display_name')
description = params.get('description')
notes = params.get('notes')
rsa_pub = params.get('rsa_pub')
manager = factory.consumer_manager()
consumer, certificate = manager.register(
consumer_id,
display_name=display_name,
description=description,
notes=notes,
rsa_pub=rsa_pub)
link = add_link(consumer)
document = {
'consumer': consumer,
'certificate': certificate
}
response = generate_json_response_with_pulp_encoder(document)
redirect_response = generate_redirect_response(response, link['_href'])
return redirect_response
class ConsumerResourceView(View):
"""
View for single consumer.
"""
@auth_required(authorization.READ)
def get(self, request, consumer_id):
"""
Return a serialized object representing the requested consumer.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: id for the requested consumer
:type consumer_id: str
:return: Response containing data for the requested consumer
:rtype: django.http.HttpResponse
"""
query_params = request.GET
details = query_params.get('details', 'false').lower() == 'true'
bindings = query_params.get('bindings', 'false').lower() == 'true'
manager = factory.consumer_manager()
consumer = manager.get_consumer(consumer_id)
consumer = expand_consumers(details, bindings, [consumer])[0]
add_link(consumer)
return generate_json_response_with_pulp_encoder(consumer)
@auth_required(authorization.DELETE)
def delete(self, request, consumer_id):
"""
Delete a specified consumer.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: id for the requested consumer
:type consumer_id: str
:return: An empty response
:rtype: django.http.HttpResponse
"""
manager = factory.consumer_manager()
response = manager.unregister(consumer_id)
return generate_json_response(response)
@auth_required(authorization.UPDATE)
@parse_json_body(json_type=dict)
def put(self, request, consumer_id):
"""
Update a specified consumer.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: id for the requested consumer
:type consumer_id: str
:return: Response representing the updated consumer
:rtype: django.http.HttpResponse
"""
params = request.body_as_json
delta = params.get('delta')
manager = factory.consumer_manager()
consumer = manager.update(consumer_id, delta)
add_link(consumer)
return generate_json_response_with_pulp_encoder(consumer)
class ConsumerSearchView(search.SearchView):
"""
This view provides GET and POST searching for Consumers.
"""
optional_bool_fields = ('details', 'bindings')
response_builder = staticmethod(generate_json_response_with_pulp_encoder)
manager = query_manager.ConsumerQueryManager()
@classmethod
def get_results(cls, query, search_method, options, *args):
"""
This overrides the base class implementation so we can include optional information.
:param query: The criteria that should be used to search for objects
:type query: dict
:param search_method: function that should be used to search
:type search_method: func
:param options: additional options for including extra data. In this case, this can contain
only 'details' and 'bindings' as keys.
:type options: dict
:return: results, expanded and serialized
:rtype: list
"""
results = list(search_method(query))
results = expand_consumers(options.get('details', False),
options.get('bindings', False),
results)
for consumer in results:
add_link(consumer)
return results
class ConsumerBindingSearchView(search.SearchView):
"""
This view provides GET and POST searching for Consumer Bindings.
"""
response_builder = staticmethod(generate_json_response_with_pulp_encoder)
manager = bind.BindManager()
class ConsumerProfileSearchView(search.SearchView):
"""
This view provides GET and POST searching for Consumer Profiles.
"""
response_builder = staticmethod(generate_json_response_with_pulp_encoder)
manager = profile.ProfileManager()
class ConsumerRepoBindingView(View):
"""
View for bindings between consumer and repository.
"""
@auth_required(authorization.READ)
def get(self, request, consumer_id, repo_id=None):
"""
Fetch all bind objects referencing the specified consumer_id. Optionally,
specify a repo_id to fetch all bind objects for the consumer_id to the repo_id.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: The specified consumer.
:type consumer_id: str
:param repo_id: The repository to retrieve bindings for (optional)
:type repo_id: str
:raises MissingResource: if some resource is missing
:return: Response representing the bindings
:rtype: django.http.HttpResponse
"""
# Check to make sure the resources exist
missing_resources = {}
if repo_id is not None:
repo = model.Repository.objects(repo_id=repo_id).first()
if repo is None:
missing_resources['repo_id'] = repo_id
# If get_consumer raises MissingResource we might miss reporting a bad repo_id
try:
factory.consumer_manager().get_consumer(consumer_id)
except MissingResource:
missing_resources['consumer_id'] = consumer_id
if missing_resources:
raise MissingResource(**missing_resources)
manager = factory.consumer_bind_manager()
bindings = manager.find_by_consumer(consumer_id, repo_id)
bindings = [serial_binding.serialize(b) for b in bindings]
return generate_json_response_with_pulp_encoder(bindings)
class ConsumerBindingsView(ConsumerRepoBindingView):
"""
View for Consumer bindings - represents the collection of
objects used to associate a consumer and a repo-distributor
association. Users wanting to create this association will
create an object in this collection. Both bind and unbind
is idempotent.
"""
@auth_required(authorization.CREATE)
@parse_json_body(json_type=dict)
def post(self, request, consumer_id):
"""
Create a bind association between the specified
consumer by id included in the URL path and a repo-distributor
specified in the POST body: {repo_id:<str>, distributor_id:<str>}.
Designed to be idempotent so only MissingResource is expected to
be raised by manager.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: consumer to bind.
:type consumer_id: str
:raises OperationPostponed: will dispatch a task if 'notify_agent' is set to True
:raises InvalidValue: if binding_config is invalid
:return: Response representing the binding(in case 'notify agent' is set to False)
:rtype: django.http.HttpResponse
"""
# get other options and validate them
body = request.body_as_json
repo_id = body.get('repo_id')
distributor_id = body.get('distributor_id')
binding_config = body.get('binding_config', {})
options = body.get('options', {})
notify_agent = body.get('notify_agent', True)
if not isinstance(binding_config, dict):
raise InvalidValue(['binding_config'])
call_report = consumer_controller.bind(
consumer_id, repo_id, distributor_id, notify_agent, binding_config, options)
if call_report.spawned_tasks:
raise OperationPostponed(call_report)
else:
return generate_json_response_with_pulp_encoder(call_report.serialize())
class ConsumerBindingResourceView(View):
"""
Represents a specific bind resource.
"""
@auth_required(authorization.READ)
def get(self, request, consumer_id, repo_id, distributor_id):
"""
Fetch a specific bind object which represents a specific association
between a consumer and repo-distributor.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: A consumer ID.
:type consumer_id: str
:param repo_id: A repo ID.
:type repo_id: str
:param distributor_id: A distributor ID.
:type distributor_id: str
:return: Response representing the binding
:rtype: django.http.HttpResponse
"""
manager = factory.consumer_bind_manager()
bind = manager.get_bind(consumer_id, repo_id, distributor_id)
serialized_bind = serial_binding.serialize(bind)
return generate_json_response_with_pulp_encoder(serialized_bind)
@auth_required(authorization.DELETE)
@parse_json_body(allow_empty=True, json_type=dict)
def delete(self, request, consumer_id, repo_id, distributor_id):
"""
Delete a bind association between the specified
consumer and repo-distributor.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: A consumer ID.
:type consumer_id: str
:param repo_id: A repo ID.
:type repo_id: str
:param distributor_id: A distributor ID.
:type distributor_id: str
:raises OperationPostponed: will dispatch a task if 'notify_agent' is set to True
:raises InvalidValue: if some parameters are invalid
:return: Response representing the deleted binding(in case 'notify agent' is set to False)
:rtype: django.http.HttpResponse
"""
body = request.body_as_json
forced = body.get('force', False)
if not isinstance(forced, bool):
raise InvalidValue(['force'])
options = body.get('options', {})
if not isinstance(options, dict):
raise InvalidValue(['options'])
if forced:
call_report = consumer_controller.force_unbind(consumer_id, repo_id, distributor_id,
options)
else:
call_report = consumer_controller.unbind(consumer_id, repo_id, distributor_id, options)
if call_report.spawned_tasks:
raise OperationPostponed(call_report)
else:
return generate_json_response_with_pulp_encoder(call_report.serialize())
class ConsumerContentActionView(View):
"""
Views for content manipulation on the consumer.
"""
@auth_required(authorization.CREATE)
@parse_json_body(json_type=dict)
def post(self, request, consumer_id, action):
"""
Install/update/uninstall content unit/s on the consumer.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: A consumer ID.
:type consumer_id: str
:param action: type of action to perform
:type action: str
:raises MissingResource: if consumer id does not exist
:raises MissingValue: if some required values are missing
:raises InvalidValue: if units are not a list of dictionaries
"""
method = getattr(self, action, None)
if method:
try:
factory.consumer_manager().get_consumer(consumer_id)
except MissingResource:
raise MissingResource(consumer_id=consumer_id)
else:
body = request.body_as_json
missing_params = []
units = body.get('units')
if units is None:
missing_params.append('units')
elif not isinstance(units, list):
raise InvalidValue('Units must be a list of dictionaries')
options = body.get('options')
if options is None:
missing_params.append('options')
if missing_params:
raise MissingValue(missing_params)
return method(request, consumer_id, units, options)
else:
return HttpResponseBadRequest('bad request')
def install(self, request, consumer_id, units, options):
"""
Install content (units) on a consumer.
Expected body: {units:[], options:<dict>}
where unit is: {type_id:<str>, unit_key={}} and the
options is a dict of install options.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: A consumer ID.
:type consumer_id: str
:param units: units to install
:type units: list
:param options: install options
:type options: dict
:raises OperationPostponed: when an async operation is performed.
"""
agent_manager = factory.consumer_agent_manager()
task = agent_manager.install_content(consumer_id, units, options)
raise OperationPostponed(TaskResult.from_task_status_dict(task))
def update(self, request, consumer_id, units, options):
"""
Update content (units) on a consumer.
Expected body: {units:[], options:<dict>}
where unit is: {type_id:<str>, unit_key={}} and the
options is a dict of update options.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: A consumer ID.
:type consumer_id: str
:param units: units to install
:type units: list
:param options: install options
:type options: dict
:raises OperationPostponed: when an async operation is performed.
"""
agent_manager = factory.consumer_agent_manager()
task = agent_manager.update_content(consumer_id, units, options)
raise OperationPostponed(TaskResult.from_task_status_dict(task))
def uninstall(self, request, consumer_id, units, options):
"""
Uninstall content (units) on a consumer.
Expected body: {units:[], options:<dict>}
where unit is: {type_id:<str>, unit_key={}} and the
options is a dict of uninstall options.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: A consumer ID.
:type consumer_id: str
:param units: units to install
:type units: list
:param options: install options
:type options: dict
:raises OperationPostponed: when an async operation is performed.
"""
agent_manager = factory.consumer_agent_manager()
task = agent_manager.uninstall_content(consumer_id, units, options)
raise OperationPostponed(TaskResult.from_task_status_dict(task))
class ConsumerHistoryView(View):
"""
View for consumer history retrieval.
"""
@auth_required(authorization.READ)
def get(self, request, consumer_id):
"""
Retrieve histroy for the specified consumer
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: A consumer ID.
:type consumer_id: str
:return: Response representing the binding
:rtype: django.http.HttpResponse
"""
# Check that the consumer exists and raise a MissingResource exception, in case it doesn't.
factory.consumer_manager().get_consumer(consumer_id)
query_param = request.GET
filters = _ensure_input_encoding(query_param)
event_type = filters.get('event_type', None)
limit = filters.get('limit', None)
sort = filters.get('sort', 'descending')
start_date = filters.get('start_date', None)
end_date = filters.get('end_date', None)
if limit:
try:
limit = int(limit)
except ValueError:
raise InvalidValue('limit')
results = factory.consumer_history_manager().query(consumer_id=consumer_id,
event_type=event_type,
limit=limit,
sort=sort,
start_date=start_date,
end_date=end_date)
return generate_json_response_with_pulp_encoder(results)
class ConsumerProfilesView(View):
"""
View Consumer profiles represents the collection of
objects used to associate consumers and installed content
unit profiles.
"""
@auth_required(authorization.READ)
def get(self, request, consumer_id):
"""
Get all profiles associated with a consumer.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: A consumer ID.
:type consumer_id: str
:return: Response representing list of profiles
:rtype: django.http.HttpResponse
"""
# Check that the consumer exists and raise a MissingResource exception, in case it doesn't.
factory.consumer_manager().get_consumer(consumer_id)
manager = factory.consumer_profile_manager()
profiles = manager.get_profiles(consumer_id)
for consumer_profile in profiles:
add_link_profile(consumer_profile)
return generate_json_response_with_pulp_encoder(profiles)
@auth_required(authorization.CREATE)
@parse_json_body(json_type=dict)
def post(self, request, consumer_id):
"""
Associate a profile with a consumer by content type ID.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: A consumer ID.
:type consumer_id: str
:raises MissingValue: if some parameter were not provided
:return: Response representing the created profile
:rtype: django.http.HttpResponse
"""
body = request.body_as_json
content_type = body.get('content_type')
profile = body.get('profile')
manager = factory.consumer_profile_manager()
new_profile = manager.create(consumer_id, content_type, profile)
if content_type is None:
raise MissingValue('content_type')
link = add_link_profile(new_profile)
response = generate_json_response_with_pulp_encoder(new_profile)
redirect_response = generate_redirect_response(response, link['_href'])
return redirect_response
class ConsumerProfileResourceView(View):
"""
View Consumer profiles represents the collection of
objects used to associate consumer and installed content
unit profiles.
"""
@auth_required(authorization.READ)
def get(self, request, consumer_id, content_type):
"""
Get profile by content type associated with consumer.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: The consumer ID.
:type consumer_id: str
:param content_type: The content type
:type consumer_id: str
:return: Response representing consumer's profile
:rtype: django.http.HttpResponse
"""
manager = factory.consumer_profile_manager()
profile = manager.get_profile(consumer_id, content_type)
add_link_profile(profile)
return generate_json_response_with_pulp_encoder(profile)
@auth_required(authorization.UPDATE)
@parse_json_body(json_type=dict)
def put(self, request, consumer_id, content_type):
"""
Update the association of a profile with a consumer by content type ID.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: A consumer ID.
:type consumer_id: str
:param content_type: A content unit type ID.
:type content_type: str
:return: Response representing the updated profile
:rtype: django.http.HttpResponse
"""
body = request.body_as_json
profile = body.get('profile')
manager = factory.consumer_profile_manager()
consumer = manager.update(consumer_id, content_type, profile)
add_link_profile(consumer)
return generate_json_response_with_pulp_encoder(consumer)
@auth_required(authorization.DELETE)
def delete(self, request, consumer_id, content_type):
"""
Delete an association between the specified
consumer and profile. Designed to be idempotent.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: A consumer ID.
:type consumer_id: str
:param content_type: The content type ID.
:type content_type: str
:return: An empty response
:rtype: django.http.HttpResponse
"""
manager = factory.consumer_profile_manager()
response = manager.delete(consumer_id, content_type)
return generate_json_response(response)
class ConsumerContentApplicabilityView(View):
"""
View for query content applicability.
"""
@auth_required(authorization.READ)
@parse_json_body(json_type=dict)
def post(self, request):
"""
Query content applicability for a given consumer criteria query.
body {criteria: <object>,
content_types: <array>[optional]}
This method returns a JSON document containing an array of objects that each have two
keys: 'consumers', and 'applicability'. 'consumers' will index an array of consumer_ids,
for consumers that have the same repository bindings and profiles. 'applicability' will
index an object that will have keys for each content type that is applicable, and the
content type ids will index the applicability data for those content types. For example,
[{'consumers': ['consumer_1', 'consumer_2'],
'applicability': {'content_type_1': ['unit_1', 'unit_3']}},
{'consumers': ['consumer_2', 'consumer_3'],
'applicability': {'content_type_1': ['unit_1', 'unit_2']}}]
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:return: Response containing applicability data matching the consumer criteria query
:rtype: jango.http.HttpResponse
"""
# Get the consumer_ids that match the consumer criteria query that the requestor queried
# with, and build a map from consumer_id to a dict with profiles and repo_ids for each
# consumer
try:
consumer_criteria = self._get_consumer_criteria(request)
content_types = self._get_content_types(request)
except InvalidValue, e:
return HttpResponseBadRequest(str(e))
response = retrieve_consumer_applicability(consumer_criteria, content_types)
return generate_json_response_with_pulp_encoder(response)
def _get_consumer_criteria(self, request):
"""
Process the POST data, finding the criteria given by the user, and resolve it to Criteria
object.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:raises InvalidValue: if some parameters were invalid
:return: A Criteria object
:rtype: pulp.server.db.model.criteria.Criteria
"""
body = request.body_as_json
consumer_criteria = body.get('criteria')
if consumer_criteria is None:
raise InvalidValue("The input to this method must be a JSON object with a "
"'criteria' key.")
consumer_criteria = Criteria.from_client_input(consumer_criteria)
return consumer_criteria
def _get_content_types(self, request):
"""
Get the list of content_types that the caller wishes to limit the response to. If the
caller did not include content types, this will return None.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:raises InvalidValue: if some parameters were invalid
:return: The list of content_types that the applicability query should be limited to,
or None if not specified
:rtype: list or None
"""
body = request.body_as_json
content_types = body.get('content_types', None)
if content_types is not None and not isinstance(content_types, list):
raise InvalidValue('content_types must index an array.')
return content_types
class ConsumerContentApplicRegenerationView(View):
"""
Content applicability regeneration for updated consumers.
"""
@auth_required(authorization.CREATE)
@parse_json_body(json_type=dict)
def post(self, request):
"""
Creates an async task to regenerate content applicability data for given consumers.
body {consumer_criteria:<dict>}
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:raises MissingValue: if some parameters are missing
:raises InvalidValue: if some parameters are invalid
:raises OperationPostponed: when an async operation is performed.
"""
body = request.body_as_json
consumer_criteria = body.get('consumer_criteria', None)
if consumer_criteria is None:
raise MissingValue('consumer_criteria')
try:
consumer_criteria = Criteria.from_client_input(consumer_criteria)
except:
raise InvalidValue('consumer_criteria')
task_tags = [tags.action_tag('content_applicability_regeneration')]
async_result = regenerate_applicability_for_consumers.apply_async_with_reservation(
tags.RESOURCE_REPOSITORY_PROFILE_APPLICABILITY_TYPE, tags.RESOURCE_ANY_ID,
(consumer_criteria.as_dict(),), tags=task_tags)
raise OperationPostponed(async_result)
class ConsumerResourceContentApplicRegenerationView(View):
"""
View Content applicability regeneration for a given consumer.
"""
@auth_required(authorization.CREATE)
@parse_json_body(allow_empty=True)
def post(self, request, consumer_id):
"""
Creates an async task to regenerate content applicability data for given consumer.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: The consumer ID.
:type consumer_id: str
:raises MissingResource: if some parameters are missing
:raises OperationPostponed: when an async operation is performed.
"""
consumer_query_manager = factory.consumer_query_manager()
if consumer_query_manager.find_by_id(consumer_id) is None:
raise MissingResource(consumer_id=consumer_id)
consumer_criteria = Criteria(filters={'consumer_id': consumer_id})
task_tags = [tags.action_tag('consumer_content_applicability_regeneration')]
async_result = regenerate_applicability_for_consumers.apply_async_with_reservation(
tags.RESOURCE_CONSUMER_TYPE,
consumer_id,
(consumer_criteria.as_dict(),),
tags=task_tags)
raise OperationPostponed(async_result)
class ConsumerUnitActionSchedulesView(View):
"""
View for scheduled content manipulation on the consumer.
"""
ACTION = None
def __init__(self):
super(ConsumerUnitActionSchedulesView, self).__init__()
self.manager = factory.consumer_schedule_manager()
@auth_required(authorization.READ)
def get(self, request, consumer_id):
"""
List schedules <action>.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: The consumer ID.
:type consumer_id: str
:raises MissingResource: if consumer does not exist
:return: Response containing consumer's schedules <action>
:rtype: django.http.HttpResponse
"""
try:
factory.consumer_manager().get_consumer(consumer_id)
except MissingResource:
raise MissingResource(consumer_id=consumer_id)
schedules = self.manager.get(consumer_id, self.ACTION)
schedule_objs = []
for schedule in schedules:
obj = scheduled_unit_management_obj(schedule.for_display())
add_link_schedule(obj, self.ACTION, consumer_id)
schedule_objs.append(obj)
return generate_json_response_with_pulp_encoder(schedule_objs)
@auth_required(authorization.CREATE)
@parse_json_body(json_type=dict)
def post(self, request, consumer_id):
"""
Create a schedule.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: The consumer ID.
:type consumer_id: str
:raises UnsupportedValue: if some extra unsupported keys were specified.
:return: Response containing just created schedule
:rtype: django.http.HttpResponse
"""
params = request.body_as_json
units = params.pop('units', None)
options = params.pop('options', {})
schedule = params.pop('schedule', None)
failure_threshold = params.pop('failure_threshold', None)
enabled = params.pop('enabled', True)
if params:
raise UnsupportedValue(params.keys())
scheduled_call = self.manager.create_schedule(
self.ACTION, consumer_id, units, options, schedule, failure_threshold, enabled)
scheduled_obj = scheduled_unit_management_obj(scheduled_call.for_display())
link = add_link_schedule(scheduled_obj, self.ACTION, consumer_id)
response = generate_json_response_with_pulp_encoder(scheduled_obj)
redirect_response = generate_redirect_response(response, link['_href'])
return redirect_response
class ConsumerUnitActionScheduleResourceView(View):
"""
View for a single scheduled consumer unit action.
"""
ACTION = None
def __init__(self):
super(ConsumerUnitActionScheduleResourceView, self).__init__()
self.manager = factory.consumer_schedule_manager()
@auth_required(authorization.READ)
def get(self, request, consumer_id, schedule_id):
"""
List a specific schedule <action>.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: The consumer ID.
:type consumer_id: str
:param schedule_id: the schedule id
:type schedule_id: str
:raises MissingResource: if consumer/schedule does not exist
:return: Response containing consumer's schedule <action>
:rtype: django.http.HttpResponse
"""
scheduled_call = None
for call in self.manager.get(consumer_id, self.ACTION):
if call.id == schedule_id:
scheduled_call = call
break
if scheduled_call is None:
raise MissingResource(consumer_id=consumer_id, schedule_id=schedule_id)
scheduled_obj = scheduled_unit_management_obj(scheduled_call.for_display())
add_link_schedule(scheduled_obj, self.ACTION, consumer_id)
return generate_json_response_with_pulp_encoder(scheduled_obj)
@auth_required(authorization.UPDATE)
@parse_json_body(allow_empty=True, json_type=dict)
def put(self, request, consumer_id, schedule_id):
"""
Update a specific schedule <action>.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: The consumer ID.
:type consumer_id: str
:param schedule_id: the schedule id
:type schedule_id: str
:return: Response containing consumer's updated schedule <action>
:rtype: django.http.HttpResponse
"""
schedule_data = request.body_as_json
options = schedule_data.pop('options', None)
units = schedule_data.pop('units', None)
if 'schedule' in schedule_data:
schedule_data['iso_schedule'] = schedule_data.pop('schedule')
schedule = self.manager.update_schedule(consumer_id, schedule_id, units,
options, schedule_data)
scheduled_obj = scheduled_unit_management_obj(schedule.for_display())
add_link_schedule(scheduled_obj, self.ACTION, consumer_id)
return generate_json_response_with_pulp_encoder(scheduled_obj)
@auth_required(authorization.DELETE)
def delete(self, request, consumer_id, schedule_id):
"""
Delete a specific schedule <action>.
:param request: WSGI request object
:type request: django.core.handlers.wsgi.WSGIRequest
:param consumer_id: The consumer ID.
:type consumer_id: str
:param schedule_id: the schedule id
:type schedule_id: str
:return: An empty response
:rtype: django.http.HttpResponse
"""
self.manager.delete_schedule(consumer_id, schedule_id)
return generate_json_response(None)
class UnitInstallSchedulesView(ConsumerUnitActionSchedulesView):
"""
View for scheduled content install on the consumer.
"""
ACTION = UNIT_INSTALL_ACTION
class UnitInstallScheduleResourceView(ConsumerUnitActionScheduleResourceView):
"""
View for a single scheduled consumer unit install.
"""
ACTION = UNIT_INSTALL_ACTION
class UnitUpdateSchedulesView(ConsumerUnitActionSchedulesView):
"""
View for scheduled content update on the consumer.
"""
ACTION = UNIT_UPDATE_ACTION
class UnitUpdateScheduleResourceView(ConsumerUnitActionScheduleResourceView):
"""
View for a single scheduled consumer unit update.
"""
ACTION = UNIT_UPDATE_ACTION
class UnitUninstallSchedulesView(ConsumerUnitActionSchedulesView):
"""
View for scheduled content uninstall on the consumer.
"""
ACTION = UNIT_UNINSTALL_ACTION
class UnitUninstallScheduleResourceView(ConsumerUnitActionScheduleResourceView):
"""
View for a single scheduled consumer unit uninstall.
"""
ACTION = UNIT_UNINSTALL_ACTION
|
import jmri.jmrit.roster
rosterlist = jmri.jmrit.roster.Roster.instance().matchingList(None, None, None, None, None, None, None)
for entry in rosterlist.toArray() :
print entry.getId(), entry.getDccAddress(), entry.isLongAddress()
|
import math
from bisect import insort
class CacheItem(object):
__slots__ = ('oid', 'tid', 'next_tid', 'data',
'counter', 'level', 'expire',
'prev', 'next')
def __repr__(self):
s = ''
for attr in self.__slots__:
try:
value = getattr(self, attr)
if value:
if attr in ('prev', 'next'):
s += ' %s=<...>' % attr
continue
elif attr == 'data':
value = '...'
s += ' %s=%r' % (attr, value)
except AttributeError:
pass
return '<%s%s>' % (self.__class__.__name__, s)
def __lt__(self, other):
return self.tid < other.tid
class ClientCache(object):
"""In-memory pickle cache based on Multi-Queue cache algorithm
Multi-Queue algorithm for Second Level Buffer Caches:
http://www.usenix.org/event/usenix01/full_papers/zhou/zhou_html/index.html
Quick description:
- There are multiple "regular" queues, plus a history queue
- The queue to store an object in depends on its access frequency
- The queue an object is in defines its lifespan (higher-index queue eq.
longer lifespan)
-> The more often an object is accessed, the higher lifespan it will
have
- Upon cache or history hit, object frequency is increased and object
might get moved to longer-lived queue
- Each access "ages" objects in cache, and an aging object is moved to
shorter-lived queue as it ages without being accessed, or in the
history queue if it's really too old.
- The history queue only contains items with counter > 0
"""
__slots__ = ('_life_time', '_max_history_size', '_max_size',
'_queue_list', '_oid_dict', '_time', '_size', '_history_size')
def __init__(self, life_time=10000, max_history_size=100000,
max_size=20*1024*1024):
self._life_time = life_time
self._max_history_size = max_history_size
self._max_size = max_size
self.clear()
def clear(self):
"""Reset cache"""
self._queue_list = [None] # first is history
self._oid_dict = {}
self._time = 0
self._size = 0
self._history_size = 0
def __repr__(self):
return ("<%s history_size=%s oid_count=%s size=%s time=%s"
" queue_length=%r (life_time=%s max_history_size=%s"
" max_size=%s)>") % (
self.__class__.__name__, self._history_size,
len(self._oid_dict), self._size, self._time,
[sum(1 for _ in self._iterQueue(x))
for x in xrange(len(self._queue_list))],
self._life_time, self._max_history_size, self._max_size)
def _iterQueue(self, level):
"""for debugging purpose"""
if level < len(self._queue_list):
item = head = self._queue_list[level]
if item:
while 1:
yield item
item = item.next
if item is head:
break
def _add(self, item):
level = item.level
try:
head = self._queue_list[level]
except IndexError:
assert len(self._queue_list) == level
self._queue_list.append(item)
item.prev = item.next = item
else:
if head:
item.prev = tail = head.prev
tail.next = head.prev = item
item.next = head
else:
self._queue_list[level] = item
item.prev = item.next = item
if level:
item.expire = self._time + self._life_time
else:
self._size -= len(item.data)
item.data = None
self._history_size += 1
if self._max_history_size < self._history_size:
self._remove(head)
item_list = self._oid_dict[head.oid]
item_list.remove(head)
if not item_list:
del self._oid_dict[head.oid]
def _remove(self, item):
level = item.level
if level is not None:
if level:
item.level = level - 1
else:
self._history_size -= 1
next = item.next
if next is item:
self._queue_list[level] = next = None
else:
item.prev.next = next
next.prev = item.prev
if self._queue_list[level] is item:
self._queue_list[level] = next
return next
def _fetched(self, item, _log=math.log):
self._remove(item)
item.counter = counter = item.counter + 1
# XXX It might be better to adjust the level according to the object
# size. See commented factor for example.
item.level = 1 + int(_log(counter, 2)
# * (1.01 - float(len(item.data)) / self._max_size)
)
self._add(item)
self._time = time = self._time + 1
for head in self._queue_list[1:]:
if head and head.expire < time:
self._remove(head)
if head.level or head.counter:
self._add(head)
else:
self._oid_dict[head.oid].remove(head)
break
def _load(self, oid, before_tid=None):
item_list = self._oid_dict.get(oid)
if item_list:
if before_tid:
for item in reversed(item_list):
if item.tid < before_tid:
next_tid = item.next_tid
if next_tid and next_tid < before_tid:
break
return item
else:
item = item_list[-1]
if not item.next_tid:
return item
def load(self, oid, before_tid=None):
"""Return a revision of oid that was current before given tid"""
item = self._load(oid, before_tid)
if item:
data = item.data
if data is not None:
self._fetched(item)
return data, item.tid, item.next_tid
def store(self, oid, data, tid, next_tid):
"""Store a new data record in the cache"""
size = len(data)
max_size = self._max_size
if size < max_size:
item = self._load(oid, next_tid)
if item:
# We don't handle late invalidations for cached oids, because
# the caller is not supposed to explicitely asks for tids after
# app.last_tid (and the cache should be empty when app.last_tid
# is still None).
assert item.tid == tid, (item, tid)
if item.level: # already stored
assert item.next_tid == next_tid and item.data == data
return
assert not item.data
# Possible case of late invalidation.
item.next_tid = next_tid
else:
item = CacheItem()
item.oid = oid
item.tid = tid
item.next_tid = next_tid
item.counter = 0
item.level = None
try:
item_list = self._oid_dict[oid]
except KeyError:
self._oid_dict[oid] = [item]
else:
if next_tid:
insort(item_list, item)
else:
prev = item_list[-1]
assert prev.next_tid <= tid, (prev, item)
item.counter = prev.counter
if prev.level:
prev.counter = 0
if prev.level > 1:
self._fetched(prev)
item_list.append(item)
else:
self._remove(prev)
item_list[-1] = item
item.data = data
self._fetched(item)
self._size += size
if max_size < self._size:
for head in self._queue_list[1:]:
while head:
next = self._remove(head)
if head.counter:
head.level = 0
self._add(head)
else:
self._oid_dict[head.oid].remove(head)
if self._size <= max_size:
return
head = next
def invalidate(self, oid, tid):
"""Mark data record as being valid only up to given tid"""
try:
item = self._oid_dict[oid][-1]
except KeyError:
pass
else:
if item.next_tid is None:
item.next_tid = tid
else:
assert item.next_tid <= tid, (item, oid, tid)
def clear_current(self):
for oid, item_list in self._oid_dict.items():
item = item_list[-1]
if item.next_tid is None:
self._remove(item)
del item_list[-1]
# We don't preserve statistics of removed items. This could be
# done easily when previous versions are cached, by copying
# counters, but it would not be fair for other oids, so it's
# probably not worth it.
if not item_list:
del self._oid_dict[oid]
def test(self):
cache = ClientCache()
repr(cache)
self.assertEqual(cache.load(1, 10), None)
self.assertEqual(cache.load(1, None), None)
cache.invalidate(1, 10)
data = '5', 5, 10
# 2 identical stores happens if 2 threads got a cache miss at the same time
cache.store(1, *data)
cache.store(1, *data)
self.assertEqual(cache.load(1, 10), data)
self.assertEqual(cache.load(1, None), None)
data = '15', 15, None
cache.store(1, *data)
self.assertEqual(cache.load(1, None), data)
cache.clear_current()
self.assertEqual(cache.load(1, None), None)
cache.store(1, *data)
cache.invalidate(1, 20)
cache.clear_current()
self.assertEqual(cache.load(1, 20), ('15', 15, 20))
cache.store(1, '10', 10, 15)
cache.store(1, '20', 20, 21)
self.assertEqual([5, 10, 15, 20], [x.tid for x in cache._oid_dict[1]])
self.assertRaises(AssertionError, cache.store, 1, '20', 20, None)
repr(cache)
# Test late invalidations.
cache.clear()
cache.store(1, '10*', 10, None)
cache._max_size = cache._size
cache.store(2, '10', 10, None)
self.assertEqual(cache._queue_list[0].oid, 1)
data = '10', 10, 15
cache.store(1, *data)
self.assertEqual(cache.load(1, 15), data)
self.assertEqual(1, cache._history_size)
cache.clear_current()
self.assertEqual(0, cache._history_size)
if __name__ == '__main__':
import unittest
unittest.TextTestRunner().run(type('', (unittest.TestCase,), {
'runTest': test})())
|
""" netlogon DCE/RPC """
import dcerpc as __dcerpc
import talloc as __talloc
class netr_NETLOGON_INFO_4(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
trusted_dc_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
trusted_domain_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
import subprocess
import shutil
import errno
import sys
import os
import re
import logging
from virtconv import _gettext as _
DISK_FORMAT_NONE = 0
DISK_FORMAT_RAW = 1
DISK_FORMAT_VMDK = 2
DISK_FORMAT_VDISK = 3
DISK_FORMAT_QCOW = 4
DISK_FORMAT_QCOW2 = 5
DISK_FORMAT_COW = 6
DISK_TYPE_DISK = 0
DISK_TYPE_CDROM = 1
DISK_TYPE_ISO = 2
CSUM_SHA1 = 0
CSUM_SHA256 = 1
disk_suffixes = {
DISK_FORMAT_RAW: ".raw",
DISK_FORMAT_VMDK: ".vmdk",
DISK_FORMAT_VDISK: ".vdisk",
DISK_FORMAT_QCOW: ".qcow",
DISK_FORMAT_QCOW2: ".qcow2",
DISK_FORMAT_COW: ".cow",
}
qemu_formats = {
DISK_FORMAT_RAW: "raw",
DISK_FORMAT_VMDK: "vmdk",
DISK_FORMAT_VDISK: "vdisk",
DISK_FORMAT_QCOW: "qcow",
DISK_FORMAT_QCOW2: "qcow2",
DISK_FORMAT_COW: "cow",
}
disk_format_names = {
"none": DISK_FORMAT_NONE,
"raw": DISK_FORMAT_RAW,
"vmdk": DISK_FORMAT_VMDK,
"vdisk": DISK_FORMAT_VDISK,
"qcow": DISK_FORMAT_QCOW,
"qcow2": DISK_FORMAT_QCOW2,
"cow": DISK_FORMAT_COW,
}
checksum_types = {
CSUM_SHA1 : "sha1",
CSUM_SHA256 : "sha256",
}
def ensuredirs(path):
"""
Make sure that all the containing directories of the given file
path exist.
"""
try:
os.makedirs(os.path.dirname(path))
except OSError, e:
if e.errno != errno.EEXIST:
raise
def run_cmd(cmd):
"""
Return the exit status and output to stdout and stderr.
"""
logging.debug("Running command: %s" % " ".join(cmd))
proc = subprocess.Popen(cmd, stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
ret = proc.wait()
return ret, proc.stdout.readlines(), proc.stderr.readlines()
def run_vdiskadm(args):
"""Run vdiskadm, returning the output."""
ret, stdout, stderr = run_cmd([ "/usr/sbin/vdiskadm" ] + args)
if ret != 0:
raise RuntimeError("Disk conversion failed with "
"exit status %d: %s" % (ret, "".join(stderr)))
if len(stderr):
print >> sys.stderr, stderr
return stdout
class disk(object):
"""Definition of an individual disk instance."""
def __init__(self, path=None, format=DISK_FORMAT_NONE, bus="ide",
type=DISK_TYPE_DISK):
self.path = path
self.format = format
self.bus = bus
self.type = type
self.clean = []
self.csum_dict = {}
def cleanup(self):
"""
Remove any generated output.
"""
for path in self.clean:
if os.path.isfile(path):
os.remove(path)
if os.path.isdir(path):
os.removedirs(path)
self.clean = []
def copy_file(self, infile, outfile):
"""Copy an individual file."""
self.clean += [ outfile ]
ensuredirs(outfile)
shutil.copy(infile, outfile)
def out_file(self, out_format):
"""Return the relative path of the output file."""
if not out_format:
return self.path
relout = self.path.replace(disk_suffixes[self.format],
disk_suffixes[out_format])
return re.sub(r'\s', '_', relout)
def vdisk_convert(self, absin, absout):
"""
Import the given disk into vdisk, including any sub-files as
necessary.
"""
stdout = run_vdiskadm([ "import", "-fnp", absin, absout ])
for item in stdout:
ignore, path = item.strip().split(':', 1)
self.clean += [ os.path.join(absout, path) ]
run_vdiskadm([ "import", "-fp", absin, absout ])
def qemu_convert(self, absin, absout, out_format):
"""
Use qemu-img to convert the given disk. Note that at least some
version of qemu-img cannot handle multi-file VMDKs, so this can
easily go wrong.
Gentoo, Debian, and Ubuntu (potentially others) install kvm-img
with kvm and qemu-img with qemu. Both would work.
"""
self.clean += [ absout ]
ret, ignore, stderr = run_cmd(["qemu-img", "convert", "-O",
qemu_formats[out_format], absin, absout])
if ret == 127:
ret, ignore, stderr = run_cmd(["kvm-img", "convert", "-O",
qemu_formats[out_format], absin, absout])
if ret != 0:
raise RuntimeError("Disk conversion failed with "
"exit status %d: %s" % (ret, "".join(stderr)))
if len(stderr):
print >> sys.stderr, stderr
def copy(self, indir, outdir, out_format):
"""
If needed, copy top-level disk files to outdir. If the copy is
done, then self.path is updated as needed.
Returns (input_in_outdir, need_conversion)
"""
need_conversion = (out_format != DISK_FORMAT_NONE and
self.format != out_format)
if os.path.isabs(self.path):
return True, need_conversion
relin = self.path
absin = os.path.join(indir, relin)
relout = self.out_file(self.format)
absout = os.path.join(outdir, relout)
#
# If we're going to use vdiskadm, it's much smarter; don't
# attempt any copies.
#
if out_format == DISK_FORMAT_VDISK:
return False, True
#
# If we're using the same directory, just account for any spaces
# in the disk filename and we're done.
#
if indir == outdir:
if relin != relout:
# vdisks cannot have spaces
if self.format == DISK_FORMAT_VDISK:
raise RuntimeError("Disk conversion failed: "
"invalid vdisk '%s'" % self.path)
self.clean += [ absout ]
self.copy_file(absin, absout)
self.path = relout
return True, need_conversion
#
# If we're not performing any conversion, just copy the file.
# XXX: This can go wrong for multi-part disks!
#
if not need_conversion:
self.clean += [ absout ]
self.copy_file(absin, absout)
self.path = relout
return True, False
#
# We're doing a conversion step, so we can rely upon convert()
# to place something in outdir.
#
return False, True
def convert(self, indir, outdir, output_format):
"""
Convert a disk into the requested format if possible, in the
given output directory. Raises RuntimeError or other failures.
"""
if self.type != DISK_TYPE_DISK:
return
out_format = disk_format_names[output_format]
if not (out_format == DISK_FORMAT_NONE or
out_format == DISK_FORMAT_VDISK or
out_format == DISK_FORMAT_RAW or
out_format == DISK_FORMAT_VMDK or
out_format == DISK_FORMAT_QCOW or
out_format == DISK_FORMAT_QCOW2 or
out_format == DISK_FORMAT_COW):
raise NotImplementedError(_("Cannot convert to disk format %s") %
output_format)
indir = os.path.normpath(os.path.abspath(indir))
outdir = os.path.normpath(os.path.abspath(outdir))
input_in_outdir, need_conversion = self.copy(indir, outdir, out_format)
if not need_conversion:
assert(input_in_outdir)
return
if os.path.isabs(self.path):
raise NotImplementedError(_("Cannot convert disk with absolute"
" path %s") % self.path)
if input_in_outdir:
indir = outdir
relin = self.path
absin = os.path.join(indir, relin)
relout = self.out_file(out_format)
absout = os.path.join(outdir, relout)
ensuredirs(absout)
if os.getenv("VIRTCONV_TEST_NO_DISK_CONVERSION"):
self.format = out_format
self.path = self.out_file(self.format)
return
if out_format == DISK_FORMAT_VDISK:
self.vdisk_convert(absin, absout)
else:
self.qemu_convert(absin, absout, out_format)
self.format = out_format
self.path = relout
def disk_formats():
"""
Return a list of supported disk formats.
"""
return disk_format_names.keys()
|
import os
from multiprocessing import Process, Queue
from Queue import Empty
import gettext
_ = gettext.translation('yali', fallback=True).ugettext
from PyQt4.Qt import QWidget, SIGNAL, QPixmap, QObject, QTimer, QMutex, QWaitCondition
import pisi.ui
import yali.util
import yali.pisiiface
import yali.postinstall
import yali.context as ctx
from yali.gui import ScreenWidget
from yali.gui.Ui.installwidget import Ui_InstallWidget
from yali.gui.Ui.installprogress import Ui_InstallProgress
from pds.gui import PAbstractBox, BOTCENTER
EventConfigure, EventInstall, EventSetProgress, EventError, EventAllFinished, EventPackageInstallFinished, EventRetry = range(1001, 1008)
class InstallProgressWidget(PAbstractBox):
def __init__(self, parent):
PAbstractBox.__init__(self, parent)
self.ui = Ui_InstallProgress()
self.ui.setupUi(self)
self._animation = 2
self._duration = 500
def showInstallProgress(self):
QTimer.singleShot(1, lambda: self.animate(start = BOTCENTER, stop = BOTCENTER))
"""
def hideHelp(self):
self.animate(start = CURRENT,
stop = TOPCENTER,
direction = OUT)
def toggleHelp(self):
if self.isVisible():
self.hideHelp()
else:
self.showHelp()
def setHelp(self, help):
self.ui.helpContent.hide()
self.ui.helpContent2.setText(help)
# self.resize(QSize(1,1))
QTimer.singleShot(1, self.adjustSize)
"""
def iter_slideshows():
slideshows = []
release_file = os.path.join(ctx.consts.branding_dir, ctx.flags.branding, ctx.consts.release_file)
slideshows_content = yali.util.parse_branding_slideshows(release_file)
for content in slideshows_content:
slideshows.append({"picture":QPixmap(os.path.join(ctx.consts.branding_dir,
ctx.flags.branding,
ctx.consts.slideshows_dir,
content[0])), "description":content[1]})
while True:
for slideshow in slideshows:
yield slideshow
class Widget(QWidget, ScreenWidget):
name = "packageInstallation"
def __init__(self):
QWidget.__init__(self)
self.ui = Ui_InstallWidget()
self.ui.setupUi(self)
self.installProgress = InstallProgressWidget(self)
self.timer = QTimer(self)
QObject.connect(self.timer, SIGNAL("timeout()"), self.changeSlideshows)
self.poll_timer = QTimer(self)
QObject.connect(self.poll_timer, SIGNAL("timeout()"), self.checkQueueEvent)
if ctx.consts.lang == "tr":
self.installProgress.ui.progress.setFormat("%%p")
self.iter_slideshows = iter_slideshows()
# show first pic
self.changeSlideshows()
self.total = 0
self.cur = 0
self.has_errors = False
# mutual exclusion
self.mutex = None
self.wait_condition = None
self.queue = None
self.retry_answer = False
self.pkg_configurator = None
self.pkg_installer = None
def shown(self):
# Disable mouse handler
ctx.mainScreen.dontAskCmbAgain = True
ctx.mainScreen.theme_shortcut.setEnabled(False)
ctx.mainScreen.ui.system_menu.setEnabled(False)
# start installer thread
ctx.logger.debug("PkgInstaller is creating...")
self.mutex = QMutex()
self.wait_condition = QWaitCondition()
self.queue = Queue()
self.pkg_installer = PkgInstaller(self.queue, self.mutex, self.wait_condition, self.retry_answer)
self.poll_timer.start(500)
# start installer polling
ctx.logger.debug("Calling PkgInstaller.start...")
self.pkg_installer.start()
ctx.mainScreen.disableNext()
ctx.mainScreen.disableBack()
# start 30 seconds
self.timer.start(1000 * 30)
self.installProgress.showInstallProgress()
def checkQueueEvent(self):
while True:
try:
data = self.queue.get_nowait()
event = data[0]
except Empty, msg:
return
ctx.logger.debug("checkQueueEvent: Processing %s event..." % event)
# EventInstall
if event == EventInstall:
package = data[1]
self.installProgress.ui.info.setText(_("Installing <b>%(name)s</b> -- %(summary)s") % {"name":package.name,
"summary":package.summary})
ctx.logger.debug("Pisi: %s installing" % package.name)
self.cur += 1
self.installProgress.ui.progress.setValue(self.cur)
# EventConfigure
elif event == EventConfigure:
package = data[1]
self.installProgress.ui.info.setText(_("Configuring <b>%s</b>") % package.name)
ctx.logger.debug("Pisi: %s configuring" % package.name)
self.cur += 1
self.installProgress.ui.progress.setValue(self.cur)
# EventSetProgress
elif event == EventSetProgress:
total = data[1]
self.installProgress.ui.progress.setMaximum(total)
# EventPackageInstallFinished
elif event == EventPackageInstallFinished:
print "***EventPackageInstallFinished called...."
self.packageInstallFinished()
# EventError
elif event == EventError:
err = data[1]
self.installError(err)
# EventRetry
elif event == EventRetry:
package = os.path.basename(data[1])
self.timer.stop()
self.poll_timer.stop()
rc = ctx.interface.messageWindow(_("Warning"),
_("Following error occured while "
"installing packages:"
"<b>%s</b><br><br>"
"Do you want to retry?")
% package,
type="custom", customIcon="warning",
customButtons=[_("Yes"), _("No")])
self.retry_answer = not rc
self.timer.start(1000 * 30)
self.poll_timer.start(500)
self.wait_condition.wakeAll()
# EventAllFinished
elif event == EventAllFinished:
self.finished()
def changeSlideshows(self):
slide = self.iter_slideshows.next()
self.ui.slideImage.setPixmap(slide["picture"])
if slide["description"].has_key(ctx.consts.lang):
description = slide["description"][ctx.consts.lang]
else:
description = slide["description"]["en"]
self.ui.slideText.setText(description)
def packageInstallFinished(self):
yali.postinstall.writeFstab()
# Configure Pending...
# run baselayout's postinstall first
yali.postinstall.initbaselayout()
# postscripts depend on 03locale...
yali.util.writeLocaleFromCmdline()
#Write InitramfsConf
yali.postinstall.writeInitramfsConf()
# run dbus in chroot
yali.util.start_dbus()
# start configurator thread
self.pkg_configurator = PkgConfigurator(self.queue, self.mutex)
self.pkg_configurator.start()
def execute(self):
# stop slide show
self.timer.stop()
self.poll_timer.stop()
return True
def finished(self):
self.poll_timer.stop()
if self.has_errors:
return
ctx.mainScreen.slotNext()
def installError(self, error):
self.has_errors = True
errorstr = _("""An error occured during the installation of packages.
This may be caused by a corrupted installation medium error:
%s
""") % str(error)
ctx.interface.exceptionWindow(error, errorstr)
ctx.logger.error("Package installation failed error with:%s" % error)
class PkgInstaller(Process):
def __init__(self, queue, mutex, wait_condition, retry_answer):
Process.__init__(self)
self.queue = queue
self.mutex = mutex
self.wait_condition = wait_condition
self.retry_answer = retry_answer
ctx.logger.debug("PkgInstaller started.")
def run(self):
ctx.logger.debug("PkgInstaller is running.")
ui = PisiUI(self.queue)
ctx.logger.debug("PisiUI is creating..")
yali.pisiiface.initialize(ui)
ctx.logger.debug("Pisi initialize is calling..")
if ctx.flags.collection:
ctx.logger.debug("Collection Repo added.")
yali.pisiiface.addRepo(ctx.consts.collection_repo_name, ctx.installData.autoCollection.index)
else:
ctx.logger.debug("CD Repo adding.")
yali.pisiiface.addCdRepo()
# show progress
total = len(ctx.packagesToInstall)
ctx.logger.debug("Sending EventSetProgress")
data = [EventSetProgress, total*2]
self.queue.put_nowait(data)
ctx.logger.debug("Found %d packages in repo.." % total)
try:
while True:
try:
yali.pisiiface.install(ctx.packagesToInstall)
break # while
except Exception, msg:
# Lock the mutex
self.mutex.lock()
# Send error message
data = [EventRetry, str(msg)]
self.queue.put_nowait(data)
# wait for the result
self.wait_condition.wait(self.mutex)
self.mutex.unlock()
if not self.retry_answer:
raise msg
except Exception, msg:
data = [EventError, msg]
self.queue.put_nowait(data)
# wait for the result
self.wait_condition.wait(self.mutex)
ctx.logger.debug("Package install finished ...")
# Package Install finished lets configure them
data = [EventPackageInstallFinished]
self.queue.put_nowait(data)
class PkgConfigurator(Process):
def __init__(self, queue, mutex):
Process.__init__(self)
self.queue = queue
self.mutex = mutex
ctx.logger.debug("PkgConfigurator started.")
def run(self):
ctx.logger.debug("PkgConfigurator is running.")
ui = PisiUI(self.queue)
yali.pisiiface.initialize(ui=ui, with_comar=True)
try:
# run all pending...
ctx.logger.debug("exec : yali.pisiiface.configurePending() called")
yali.pisiiface.configurePending()
except Exception, msg:
data = [EventError, msg]
self.queue.put_nowait(data)
# Remove temporary repository and install add real
if ctx.flags.collection:
yali.pisiiface.switchToPardusRepo(ctx.consts.collection_repo_name)
else:
yali.pisiiface.switchToPardusRepo(ctx.consts.cd_repo_name)
data = [EventAllFinished]
self.queue.put_nowait(data)
class PisiUI(pisi.ui.UI):
def __init__(self, queue):
pisi.ui.UI.__init__(self)
self.queue = queue
self.last_package = ''
def notify(self, event, **keywords):
if event == pisi.ui.installing:
ctx.logger.debug("PisiUI.notify event: Install")
data = [EventInstall, keywords['package']]
self.last_package = keywords['package'].name
self.queue.put_nowait(data)
elif event == pisi.ui.configuring:
ctx.logger.debug("PisiUI.notify event: Configure")
data = [EventConfigure, keywords['package']]
self.last_package = keywords['package'].name
self.queue.put_nowait(data)
def error(self, msg):
ctx.logger.debug("PisiUI.error: %s" % unicode(msg))
def warning(self, msg):
ctx.logger.debug("PisiUI.warning: %s" % unicode(msg))
|
import re
from urllib.request import urlopen
import znc
class titles(znc.Module):
description = "Read titles"
def OnChanMsg(self, nick, chan, msg):
m = re.search("(https?://([-\w\.]+)+(:\d+)?(/([\w/_\.]*(\?\S+)?)?)?)", msg.s)
if m:
url = str(m.group(1))
page = urlopen(url)
c = re.compile("<title>(.*?)</title>")
for line in page:
trial = c.search(str(line))
if trial:
self.PutIRC("PRIVMSG "+chan.GetName()+" :[ Title : "+str(trial.group(1))+" ] [ URL: "+url+" ]")
break
page.close()
return znc.CONTINUE
|
import sys
import os
import re
if sys.version_info[:2] < (3, 3):
raise SystemExit("Python >=3.3 required")
from distutils.core import setup
text = open(os.path.join(os.path.dirname(sys.argv[0]), "libpcron/__init__.py")).read()
match = re.search(r'^__version__ = "([^"]+)"', text, re.M)
version = match.group(1)
kwargs = {
"name": "pcron",
"version": version,
"author": "Lars Gustäbel",
"author_email": "lars@gustaebel.de",
"url": "http://github.com/gustaebel/pcron/",
"description": "a periodic job scheduler",
"long_description":
"pcron is a periodic job scheduler inspired by fcron",
"license": "GPL",
"classifiers": ["Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Natural Language :: English",
"Operating System :: Unix",
"Programming Language :: Python :: 3",
"Topic :: Utilities"],
"packages": ["libpcron"],
"scripts": ["pcrond", "pcron", "pcrontab"]
}
setup(**kwargs)
|
import itertools, math
from ..ssa import objtypes
from .stringescape import escapeString
class VariableDeclarator(object):
def __init__(self, typename, identifier): self.typename = typename; self.local = identifier
def print_(self, printer, print_):
return '{} {}'.format(print_(self.typename), print_(self.local))
class JavaStatement(object):
expr = None #provide default for subclasses that don't have an expression
def getScopes(self): return ()
def addCastsAndParens(self, env):
if self.expr is not None:
self.expr.addCasts(env)
self.expr.addParens()
class ExpressionStatement(JavaStatement):
def __init__(self, expr):
self.expr = expr
def print_(self, printer, print_): return print_(self.expr) + ';'
class LocalDeclarationStatement(JavaStatement):
def __init__(self, decl, expr=None):
self.decl = decl
self.expr = expr
def print_(self, printer, print_):
if self.expr is not None:
return '{} = {};'.format(print_(self.decl), print_(self.expr))
return print_(self.decl) + ';'
def addCastsAndParens(self, env):
if self.expr is not None:
self.expr.addCasts(env)
if not isJavaAssignable(env, self.expr.dtype, self.decl.typename.tt):
self.expr = makeCastExpr(self.decl.typename.tt, self.expr, fixEnv=env)
self.expr.addParens()
class ReturnStatement(JavaStatement):
def __init__(self, expr=None, tt=None):
self.expr = expr
self.tt = tt
def print_(self, printer, print_): return 'return {};'.format(print_(self.expr)) if self.expr is not None else 'return;'
def addCastsAndParens(self, env):
if self.expr is not None:
self.expr.addCasts(env)
if not isJavaAssignable(env, self.expr.dtype, self.tt):
self.expr = makeCastExpr(self.tt, self.expr, fixEnv=env)
self.expr.addParens()
class ThrowStatement(JavaStatement):
def __init__(self, expr):
self.expr = expr
def print_(self, printer, print_): return 'throw {};'.format(print_(self.expr))
class JumpStatement(JavaStatement):
def __init__(self, target, isFront):
keyword = 'continue' if isFront else 'break'
label = (' ' + target.getLabel()) if target is not None else ''
self.str = keyword + label + ';'
def print_(self, printer, print_): return self.str
sbcount = itertools.count()
class LazyLabelBase(JavaStatement):
# Jumps are represented by arbitrary 'keys', currently just the key of the
# original proxy node. Each item has a continueKey and a breakKey representing
# the beginning and the point just past the end respectively. breakKey may be
# None if this item appears at the end of the function and there is nothing after it.
# Statement blocks have a jump key representing where it jumps to if any. This
# may be None if the jump is unreachable (such as if there is a throw or return)
def __init__(self, labelfunc, begink, endk):
self.label, self.func = None, labelfunc
self.continueKey = begink
self.breakKey = endk
# self.id = next(sbcount) #For debugging purposes
def getLabel(self):
if self.label is None:
self.label = self.func() #Not a bound function!
return self.label
def getLabelPrefix(self): return '' if self.label is None else self.label + ': '
# def getLabelPrefix(self): return self.getLabel() + ': '
#For debugging
def __str__(self):
if isinstance(self, StatementBlock):
return 'Sb'+str(self.id)
return type(self).__name__[:3]+str(self.id)
__repr__ = __str__
class TryStatement(LazyLabelBase):
def __init__(self, labelfunc, begink, endk, tryb, pairs):
super(TryStatement, self).__init__(labelfunc, begink, endk)
self.tryb, self.pairs = tryb, pairs
def getScopes(self): return (self.tryb,) + zip(*self.pairs)[1]
def print_(self, printer, print_):
tryb = print_(self.tryb)
parts = ['catch({})\n{}'.format(print_(x), print_(y)) for x,y in self.pairs]
return '{}try\n{}\n{}'.format(self.getLabelPrefix(), tryb, '\n'.join(parts))
class IfStatement(LazyLabelBase):
def __init__(self, labelfunc, begink, endk, expr, scopes):
super(IfStatement, self).__init__(labelfunc, begink, endk)
self.expr = expr #don't rename without changing how var replacement works!
self.scopes = scopes
# assert(len(self.scopes) == 1 or len(self.scopes) == 2)
def getScopes(self): return self.scopes
def print_(self, printer, print_):
lbl = self.getLabelPrefix()
parts = [self.expr] + list(self.scopes)
if len(self.scopes) == 1:
parts = [print_(x) for x in parts]
return '{}if({})\n{}'.format(lbl, *parts)
# Special case handling for 'else if'
sep = '\n' #else seperator depends on if we have else if
fblock = self.scopes[1]
if len(fblock.statements) == 1:
stmt = fblock.statements[-1]
if isinstance(stmt, IfStatement) and stmt.label is None:
sep, parts[-1] = ' ', stmt
parts = [print_(x) for x in parts]
return '{}if({})\n{}\nelse{sep}{}'.format(lbl, *parts, sep=sep)
class SwitchStatement(LazyLabelBase):
def __init__(self, labelfunc, begink, endk, expr, pairs):
super(SwitchStatement, self).__init__(labelfunc, begink, endk)
self.expr = expr #don't rename without changing how var replacement works!
self.pairs = pairs
def getScopes(self): return zip(*self.pairs)[1]
def hasDefault(self): return None in zip(*self.pairs)[0]
def print_(self, printer, print_):
expr = print_(self.expr)
def printCase(keys):
if keys is None:
return 'default: '
return ''.join(map('case {}: '.format, sorted(keys)))
bodies = [(printCase(keys) + print_(scope)) for keys, scope in self.pairs]
if self.pairs[-1][0] is None and len(self.pairs[-1][1].statements) == 0:
bodies.pop()
contents = '\n'.join(bodies)
indented = [' '+line for line in contents.splitlines()]
lines = ['{'] + indented + ['}']
return '{}switch({}){}'.format(self.getLabelPrefix(), expr, '\n'.join(lines))
class WhileStatement(LazyLabelBase):
def __init__(self, labelfunc, begink, endk, parts):
super(WhileStatement, self).__init__(labelfunc, begink, endk)
self.expr = Literal.TRUE
self.parts = parts
assert(len(self.parts) == 1)
def getScopes(self): return self.parts
def print_(self, printer, print_):
parts = print_(self.expr), print_(self.parts[0])
return '{}while({})\n{}'.format(self.getLabelPrefix(), *parts)
class StatementBlock(LazyLabelBase):
def __init__(self, labelfunc, begink, endk, statements, jumpk, labelable=True):
super(StatementBlock, self).__init__(labelfunc, begink, endk)
self.parent = None #should be assigned later
self.statements = statements
self.jumpKey = jumpk
self.labelable = labelable
def doesFallthrough(self): return self.jumpKey is None or self.jumpKey == self.breakKey
def getScopes(self): return self,
def print_(self, printer, print_):
assert(self.labelable or self.label is None)
contents = '\n'.join(print_(x) for x in self.statements)
indented = [' '+line for line in contents.splitlines()]
# indented[:0] = [' //{}{}'.format(self,x) for x in (self.breakKey, self.continueKey, self.jumpKey)]
lines = [self.getLabelPrefix() + '{'] + indented + ['}']
return '\n'.join(lines)
@staticmethod
def join(*scopes):
blists = [s.bases for s in scopes if s is not None] #allow None to represent the universe (top element)
if not blists:
return None
common = [x for x in zip(*blists) if len(set(x)) == 1]
return common[-1][0]
class StringStatement(JavaStatement):
def __init__(self, s):
self.s = s
def print_(self, printer, print_): return self.s
_assignable_sprims = '.byte','.short','.char'
_assignable_lprims = '.int','.long','.float','.double'
def isObject(tt):
return tt == objtypes.NullTT or tt[1] > 0 or not tt[0][0].startswith('.')
def isPrimativeAssignable(fromt, to):
x, y = fromt[0], to[0]
assert(fromt[1] == to[1] == 0)
if x == y or (x in _assignable_sprims and y in _assignable_lprims):
return True
elif (x in _assignable_lprims and y in _assignable_lprims):
return _assignable_lprims.index(x) <= _assignable_lprims.index(y)
else:
return x == '.byte' and y == '.short'
def isJavaAssignable(env, fromt, to):
if fromt is None or to is None: #this should never happen, except during debugging
return True
if isObject(to):
assert(isObject(fromt))
#todo - make it check interfaces too
return objtypes.isSubtype(env, fromt, to)
else: #allowed if numeric conversion is widening
return isPrimativeAssignable(fromt, to)
_int_tts = objtypes.LongTT, objtypes.IntTT, objtypes.ShortTT, objtypes.CharTT, objtypes.ByteTT
def makeCastExpr(newtt, expr, fixEnv=None):
if newtt == expr.dtype:
return expr
# if casting a literal with compatible type, just create a literal of the new type
if isinstance(expr, Literal):
allowed_conversions = [
(objtypes.FloatTT, objtypes.DoubleTT),
(objtypes.IntTT, objtypes.LongTT),
(objtypes.IntTT, objtypes.BoolTT),
(objtypes.BoolTT, objtypes.IntTT),
]
if (expr.dtype, newtt) in allowed_conversions:
return Literal(newtt, expr.val)
if newtt == objtypes.IntTT and expr.dtype == objtypes.BoolTT:
return Ternary(expr, Literal.ONE, Literal.ZERO)
elif newtt == objtypes.BoolTT and expr.dtype == objtypes.IntTT:
return BinaryInfix('!=', (expr, Literal.ZERO), objtypes.BoolTT)
ret = Cast(TypeName(newtt), expr)
if fixEnv is not None:
ret = ret.fix(fixEnv)
return ret
class JavaExpression(object):
precedence = 0 #Default precedence
params = () #for subclasses that don't have params
def complexity(self): return 1 + max(e.complexity() for e in self.params) if self.params else 0
def postFlatIter(self):
return itertools.chain([self], *[expr.postFlatIter() for expr in self.params])
def print_(self, printer, print_):
return self.fmt.format(*[print_(expr) for expr in self.params])
def replaceSubExprs(self, rdict):
if self in rdict:
return rdict[self]
self.params = [param.replaceSubExprs(rdict) for param in self.params]
return self
def addCasts(self, env):
for param in self.params:
param.addCasts(env)
self.addCasts_sub(env)
def addCasts_sub(self, env): pass
def addParens(self):
for param in self.params:
param.addParens()
self.params = list(self.params) #make it easy for children to edit
self.addParens_sub()
def addParens_sub(self): pass
def isLocalAssign(self): return isinstance(self, Assignment) and isinstance(self.params[0], Local)
def __repr__(self):
return type(self).__name__.rpartition('.')[-1] + ' ' + print_(self)
__str__ = __repr__
class ArrayAccess(JavaExpression):
def __init__(self, *params):
if params[0].dtype == objtypes.NullTT:
#Unfortunately, Java doesn't really support array access on null constants
#So we'll just cast it to Object[] as a hack
param = makeCastExpr(('java/lang/Object',1), params[0])
params = param, params[1]
self.params = params
self.fmt = '{}[{}]'
@property
def dtype(self):
base, dim = self.params[0].dtype
assert(dim>0)
return base, dim-1
def addParens_sub(self):
p0 = self.params[0]
if p0.precedence > 0 or isinstance(p0, ArrayCreation):
self.params[0] = Parenthesis(p0)
class ArrayCreation(JavaExpression):
def __init__(self, tt, *sizeargs):
base, dim = tt
self.params = (TypeName((base,0)),) + sizeargs
self.dtype = tt
assert(dim >= len(sizeargs) > 0)
self.fmt = 'new {}' + '[{}]'*len(sizeargs) + '[]'*(dim-len(sizeargs))
class Assignment(JavaExpression):
precedence = 21
def __init__(self, *params):
self.params = params
self.fmt = '{} = {}'
@property
def dtype(self): return self.params[0].dtype
def addCasts_sub(self, env):
left, right = self.params
if not isJavaAssignable(env, right.dtype, left.dtype):
expr = makeCastExpr(left.dtype, right, fixEnv=env)
self.params = left, expr
_binary_ptable = ['* / %', '+ -', '<< >> >>>',
'< > <= >= instanceof', '== !=',
'&', '^', '|', '&&', '||']
binary_precedences = {}
for _ops, _val in zip(_binary_ptable, range(10,20)):
for _op in _ops.split():
binary_precedences[_op] = _val
class BinaryInfix(JavaExpression):
def __init__(self, opstr, params, dtype=None):
assert(len(params) == 2)
self.params = params
self.opstr = opstr
self.fmt = '{{}} {} {{}}'.format(opstr)
self._dtype = dtype
self.precedence = binary_precedences[opstr]
@property
def dtype(self): return self.params[0].dtype if self._dtype is None else self._dtype
def addParens_sub(self):
myprec = self.precedence
associative = myprec >= 15 #for now we treat +, *, etc as nonassociative due to floats
for i, p in enumerate(self.params):
if p.precedence > myprec:
self.params[i] = Parenthesis(p)
elif p.precedence == myprec and i > 0 and not associative:
self.params[i] = Parenthesis(p)
class Cast(JavaExpression):
precedence = 5
def __init__(self, *params):
self.dtype = params[0].tt
self.params = params
self.fmt = '({}){}'
def fix(self, env):
tt, expr = self.dtype, self.params[1]
# "Impossible" casts are a compile error in Java.
# This can be fixed with an intermediate cast to Object
if isObject(tt):
if not isJavaAssignable(env, tt, expr.dtype):
if not isJavaAssignable(env, expr.dtype, tt):
expr = makeCastExpr(objtypes.ObjectTT, expr)
self.params = self.params[0], expr
return self
def addCasts_sub(self, env): self.fix(env)
def addParens_sub(self):
p1 = self.params[1]
if p1.precedence > 5 or (isinstance(p1, UnaryPrefix) and p1.opstr[0] in '-+'):
self.params[1] = Parenthesis(p1)
class ClassInstanceCreation(JavaExpression):
def __init__(self, typename, tts, arguments):
self.typename, self.tts, self.params = typename, tts, arguments
self.dtype = typename.tt
def print_(self, printer, print_):
return 'new {}({})'.format(print_(self.typename), ', '.join(print_(x) for x in self.params))
def addCasts_sub(self, env):
newparams = []
for tt, expr in zip(self.tts, self.params):
if expr.dtype != tt:
expr = makeCastExpr(tt, expr, fixEnv=env)
newparams.append(expr)
self.params = newparams
class FieldAccess(JavaExpression):
def __init__(self, primary, name, dtype, op=None, printLeft=True):
self.dtype = dtype
self.params = [primary]
self.op, self.name = op, name
self.printLeft = printLeft
# self.params, self.name = [primary], escapeString(name)
# self.fmt = ('{}.' if printLeft else '') + self.name
def print_(self, printer, print_):
if self.op is None:
name = self.name
assert(name in ('length','class'))
else:
cls, name, desc = self.op.target, self.op.name, self.op.desc
name = escapeString(printer.fieldName(cls, name, desc))
pre = print_(self.params[0])+'.' if self.printLeft else ''
return pre+name
def addParens_sub(self):
p0 = self.params[0]
if p0.precedence > 0:
self.params[0] = Parenthesis(p0)
def printFloat(x, isSingle):
#TODO make this less hackish. We only really need the parens if it's preceded by unary minus
#note: NaN may have arbitrary sign
if math.copysign(1.0, x) == -1.0 and not math.isnan(x):
return '(-{})'.format(printFloat(math.copysign(x, 1.0), isSingle))
suffix = 'f' if isSingle else ''
if math.isnan(x):
return '(0.0{0}/0.0{0})'.format(suffix)
elif math.isinf(x):
return '(1.0{0}/0.0{0})'.format(suffix)
if isSingle and x > 0.0:
#Try to find more compract representation for floats, since repr treats everything as doubles
m, e = math.frexp(x)
half_ulp2 = math.ldexp(1.0, max(e - 25, -150)) #don't bother doubling when near the upper range of a given e value
half_ulp1 = (half_ulp2/2) if m == 0.5 and e >= -125 else half_ulp2
lbound, ubound = x-half_ulp1, x+half_ulp2
assert(lbound < x < ubound)
s = '{:g}'.format(x).replace('+','')
if lbound < float(s) < ubound: #strict ineq to avoid potential double rounding issues
return s + suffix
return repr(x) + suffix
class Literal(JavaExpression):
def __init__(self, vartype, val):
self.dtype = vartype
self.val = val
self.str = None
if vartype == objtypes.StringTT:
self.str = '"' + escapeString(val) + '"'
elif vartype == objtypes.IntTT:
self.str = str(val)
elif vartype == objtypes.LongTT:
self.str = str(val) + 'L'
elif vartype == objtypes.FloatTT or vartype == objtypes.DoubleTT:
assert(type(val) == float)
self.str = printFloat(val, vartype == objtypes.FloatTT)
elif vartype == objtypes.NullTT:
self.str = 'null'
elif vartype == objtypes.ClassTT:
self.params = [TypeName(val)]
self.fmt = '{}.class'
elif vartype == objtypes.BoolTT:
self.str = 'true' if val else 'false'
else:
assert(0)
def print_(self, printer, print_):
if self.str is None:
#for printing class literals
return self.fmt.format(print_(self.params[0]))
return self.str
def _key(self): return self.dtype, self.val
def __eq__(self, other): return type(self) == type(other) and self._key() == other._key()
def __ne__(self, other): return type(self) != type(other) or self._key() != other._key()
def __hash__(self): return hash(self._key())
Literal.FALSE = Literal(objtypes.BoolTT, 0)
Literal.TRUE = Literal(objtypes.BoolTT, 1)
Literal.N_ONE = Literal(objtypes.IntTT, -1)
Literal.ZERO = Literal(objtypes.IntTT, 0)
Literal.ONE = Literal(objtypes.IntTT, 1)
Literal.LZERO = Literal(objtypes.LongTT, 0)
Literal.FZERO = Literal(objtypes.FloatTT, 0.0)
Literal.DZERO = Literal(objtypes.DoubleTT, 0.0)
Literal.NULL = Literal(objtypes.NullTT, None)
class Local(JavaExpression):
def __init__(self, vartype, namefunc):
self.dtype = vartype
self.name = None
self.func = namefunc
def print_(self, printer, print_):
if self.name is None:
self.name = self.func(self)
return self.name
class MethodInvocation(JavaExpression):
def __init__(self, left, name, tts, arguments, op, dtype):
if left is None:
self.params = arguments
else:
self.params = [left] + arguments
self.hasLeft = (left is not None)
self.dtype = dtype
self.name = name
self.tts = tts
self.op = op #keep around for future reference and new merging
def print_(self, printer, print_):
cls, name, desc = self.op.target, self.op.name, self.op.desc
if name != self.name:
assert(name == '<init>')
name = self.name
else:
name = escapeString(printer.methodName(cls, name, desc))
if self.hasLeft:
left, arguments = self.params[0], self.params[1:]
return '{}.{}({})'.format(print_(left), name, ', '.join(print_(x) for x in arguments))
else:
arguments = self.params
return '{}({})'.format(name, ', '.join(print_(x) for x in arguments))
def addCasts_sub(self, env):
newparams = []
for tt, expr in zip(self.tts, self.params):
if expr.dtype != tt:
expr = makeCastExpr(tt, expr, fixEnv=env)
newparams.append(expr)
self.params = newparams
def addParens_sub(self):
if self.hasLeft:
p0 = self.params[0]
if p0.precedence > 0:
self.params[0] = Parenthesis(p0)
class Parenthesis(JavaExpression):
def __init__(self, param):
self.params = param,
self.fmt = '({})'
@property
def dtype(self): return self.params[0].dtype
class Ternary(JavaExpression):
precedence = 20
def __init__(self, *params):
self.params = params
self.fmt = '{} ? {} : {}'
@property
def dtype(self): return self.params[1].dtype
def addParens_sub(self):
#Add unecessary parenthesis to complex conditions for readability
if self.params[0].precedence >= 20 or self.params[0].complexity() > 0:
self.params[0] = Parenthesis(self.params[0])
if self.params[2].precedence > 20:
self.params[2] = Parenthesis(self.params[2])
class TypeName(JavaExpression):
def __init__(self, tt):
self.dtype = None
self.tt = tt
def print_(self, printer, print_):
name, dim = self.tt
if name[0] == '.': #primative type:
name = name[1:]
else:
name = printer.className(name)
name = escapeString(name.replace('/','.'))
s = name + '[]'*dim
if s.rpartition('.')[0] == 'java.lang':
s = s.rpartition('.')[2]
return s
def complexity(self): return -1 #exprs which have this as a param won't be bumped up to 1 uncessarily
class CatchTypeNames(JavaExpression): #Used for caught exceptions, which can have multiple types specified
def __init__(self, env, tts):
assert(tts and not any(zip(*tts)[1])) #at least one type, no array types
self.tnames = map(TypeName, tts)
self.dtype = objtypes.commonSupertype(env, tts)
def print_(self, printer, print_):
return ' | '.join(print_(tn) for tn in self.tnames)
class UnaryPrefix(JavaExpression):
precedence = 5
def __init__(self, opstr, param, dtype=None):
self.params = [param]
self.opstr = opstr
self.fmt = opstr + '{}'
self._dtype = dtype
@property
def dtype(self): return self.params[0].dtype if self._dtype is None else self._dtype
def addParens_sub(self):
p0 = self.params[0]
if p0.precedence > 5 or (isinstance(p0, UnaryPrefix) and p0.opstr[0] == self.opstr[0]):
self.params[0] = Parenthesis(p0)
class Dummy(JavaExpression):
def __init__(self, fmt, params, isNew=False):
self.params = params
self.fmt = fmt
self.isNew = isNew
self.dtype = None
|
from datetime import datetime
from unittest.mock import patch
from dateutil.relativedelta import relativedelta
import listenbrainz_spark.stats
from listenbrainz_spark import utils, stats
from listenbrainz_spark.tests import SparkNewTestCase
from pyspark.sql import Row
class InitTestCase(SparkNewTestCase):
def test_replace_days(self):
self.assertEqual(stats.replace_days(datetime(2019, 5, 12), 13), datetime(2019, 5, 13))
def test_replace_months(self):
self.assertEqual(stats.replace_months(datetime(2020, 5, 18), 6), datetime(2020, 6, 18))
def test_offset_months(self):
d1 = stats.offset_months(datetime(2019, 5, 12), 3, shift_backwards=False)
d2 = datetime(2019, 8, 12)
self.assertEqual(d1, d2)
d1 = stats.offset_months(datetime(2019, 5, 12), 3)
d2 = datetime(2019, 2, 12)
self.assertEqual(d1, d2)
def test_offset_days(self):
d1 = stats.offset_days(datetime(2019, 5, 12), 3, shift_backwards=False)
d2 = datetime(2019, 5, 15)
self.assertEqual(d1, d2)
d1 = stats.offset_days(datetime(2019, 5, 12), 3)
d2 = datetime(2019, 5, 9)
self.assertEqual(d1, d2)
def test_run_query(self):
df = utils.create_dataframe([Row(column1=1, column2=2)], schema=None)
utils.register_dataframe(df, "table")
new_df = stats.run_query("SELECT * FROM table")
self.assertEqual(new_df.count(), df.count())
def test_get_day_end(self):
day = datetime(2020, 6, 19)
self.assertEqual(datetime(2020, 6, 19, 23, 59, 59, 999999), stats.get_day_end(day))
def test_get_month_end(self):
month = datetime(2020, 6, 1)
self.assertEqual(datetime(2020, 6, 30, 23, 59, 59, 999999), stats.get_month_end(month))
def test_get_year_end(self):
self.assertEqual(datetime(2020, 12, 31, 23, 59, 59, 999999), stats.get_year_end(datetime(2020, 1, 1)))
def test_get_last_monday(self):
date = datetime(2020, 5, 19)
self.assertEqual(datetime(2020, 5, 18), listenbrainz_spark.stats.get_last_monday(date))
@patch("listenbrainz_spark.stats.get_latest_listen_ts")
def test_get_dates_for_stats_range(self, mock_get_latest_listen_ts):
quarters = [
datetime(2021, 1, 1),
datetime(2021, 4, 1),
datetime(2021, 7, 1),
datetime(2021, 10, 1),
datetime(2022, 1, 1)
]
mock_get_latest_listen_ts.return_value = datetime(2021, 4, 5, 2, 3, 0)
self.assertEqual((quarters[0], quarters[1]), stats.get_dates_for_stats_range("quarter"))
mock_get_latest_listen_ts.return_value = datetime(2021, 8, 7, 2, 3, 0)
self.assertEqual((quarters[1], quarters[2]), stats.get_dates_for_stats_range("quarter"))
mock_get_latest_listen_ts.return_value = datetime(2021, 11, 9, 2, 3, 0)
self.assertEqual((quarters[2], quarters[3]), stats.get_dates_for_stats_range("quarter"))
mock_get_latest_listen_ts.return_value = datetime(2022, 1, 8, 2, 3, 0)
self.assertEqual((quarters[3], quarters[4]), stats.get_dates_for_stats_range("quarter"))
periods = [
datetime(2020, 7, 1),
datetime(2021, 1, 1),
datetime(2021, 7, 1)
]
mock_get_latest_listen_ts.return_value = datetime(2021, 1, 9, 2, 3, 0)
self.assertEqual((periods[0], periods[1]), stats.get_dates_for_stats_range("half_yearly"))
mock_get_latest_listen_ts.return_value = datetime(2021, 8, 8, 2, 3, 0)
self.assertEqual((periods[1], periods[2]), stats.get_dates_for_stats_range("half_yearly"))
mock_get_latest_listen_ts.return_value = datetime(2021, 11, 24, 2, 3, 0)
self.assertEqual((datetime(2021, 11, 22), datetime(2021, 11, 24)), stats.get_dates_for_stats_range("this_week"))
mock_get_latest_listen_ts.return_value = datetime(2021, 11, 22, 3, 0, 0)
self.assertEqual((datetime(2021, 11, 15), datetime(2021, 11, 22)), stats.get_dates_for_stats_range("this_week"))
mock_get_latest_listen_ts.return_value = datetime(2021, 11, 24, 2, 3, 0)
self.assertEqual((datetime(2021, 11, 15), datetime(2021, 11, 22)), stats.get_dates_for_stats_range("week"))
mock_get_latest_listen_ts.return_value = datetime(2021, 11, 22, 3, 0, 0)
self.assertEqual((datetime(2021, 11, 15), datetime(2021, 11, 22)), stats.get_dates_for_stats_range("week"))
mock_get_latest_listen_ts.return_value = datetime(2021, 11, 21, 2, 3, 0)
self.assertEqual((datetime(2021, 11, 1), datetime(2021, 11, 21)), stats.get_dates_for_stats_range("this_month"))
mock_get_latest_listen_ts.return_value = datetime(2021, 11, 1, 3, 0, 0)
self.assertEqual((datetime(2021, 10, 1), datetime(2021, 11, 1)), stats.get_dates_for_stats_range("this_month"))
mock_get_latest_listen_ts.return_value = datetime(2021, 11, 21, 2, 3, 0)
self.assertEqual((datetime(2021, 10, 1), datetime(2021, 11, 1)), stats.get_dates_for_stats_range("month"))
mock_get_latest_listen_ts.return_value = datetime(2021, 11, 1, 3, 0, 0)
self.assertEqual((datetime(2021, 10, 1), datetime(2021, 11, 1)), stats.get_dates_for_stats_range("month"))
mock_get_latest_listen_ts.return_value = datetime(2021, 11, 21, 2, 3, 0)
self.assertEqual((datetime(2020, 1, 1), datetime(2021, 1, 1)), stats.get_dates_for_stats_range("year"))
mock_get_latest_listen_ts.return_value = datetime(2021, 1, 1, 2, 3, 0)
self.assertEqual((datetime(2020, 1, 1), datetime(2021, 1, 1)), stats.get_dates_for_stats_range("year"))
mock_get_latest_listen_ts.return_value = datetime(2021, 1, 1, 2, 1, 0)
self.assertEqual((datetime(2020, 1, 1), datetime(2021, 1, 1)), stats.get_dates_for_stats_range("this_year"))
mock_get_latest_listen_ts.return_value = datetime(2021, 11, 1, 3, 0, 0)
self.assertEqual((datetime(2021, 1, 1), datetime(2021, 11, 1)), stats.get_dates_for_stats_range("this_year"))
|
from PyQt4.QtGui import QComboBox
class StringComboBox(QComboBox):
def __init__(self, parent):
QComboBox.__init__(self, parent)
def addPathAndSelect(self, path):
for i in range(0, self.count()):
if self.itemText(i) == path :
self.setCurrentIndex(i)
return False
self.addItem(path)
self.setCurrentIndex(self.count() - 1)
return True
def addPath(self, path):
for i in range(0, self.count()):
if self.itemText(i) == path :
return False
self.addItem(path)
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.