repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
vitmod/enigma2-1 | lib/python/Components/config.py | 1 | 53976 | from enigma import getPrevAsciiCode
from Tools.NumericalTextInput import NumericalTextInput
from Tools.Directories import resolveFilename, SCOPE_CONFIG, fileExists
from Components.Harddisk import harddiskmanager
from copy import copy as copy_copy
from os import path as os_path
from time import localtime, strftime
# ConfigElement, the base class of all ConfigElements.
# it stores:
# value the current value, usefully encoded.
# usually a property which retrieves _value,
# and maybe does some reformatting
# _value the value as it's going to be saved in the configfile,
# though still in non-string form.
# this is the object which is actually worked on.
# default the initial value. If _value is equal to default,
# it will not be stored in the config file
# saved_value is a text representation of _value, stored in the config file
#
# and has (at least) the following methods:
# save() stores _value into saved_value,
# (or stores 'None' if it should not be stored)
# load() loads _value from saved_value, or loads
# the default if saved_value is 'None' (default)
# or invalid.
#
class ConfigElement(object):
def __init__(self):
self.extra_args = {}
self.saved_value = None
self.save_forced = False
self.last_value = None
self.save_disabled = False
self.__notifiers = None
self.__notifiers_final = None
self.enabled = True
self.callNotifiersOnSaveAndCancel = False
def getNotifiers(self):
if self.__notifiers is None:
self.__notifiers = [ ]
return self.__notifiers
def setNotifiers(self, val):
self.__notifiers = val
notifiers = property(getNotifiers, setNotifiers)
def getNotifiersFinal(self):
if self.__notifiers_final is None:
self.__notifiers_final = [ ]
return self.__notifiers_final
def setNotifiersFinal(self, val):
self.__notifiers_final = val
notifiers_final = property(getNotifiersFinal, setNotifiersFinal)
# you need to override this to do input validation
def setValue(self, value):
self._value = value
self.changed()
def getValue(self):
return self._value
value = property(getValue, setValue)
# you need to override this if self.value is not a string
def fromstring(self, value):
return value
# you can overide this for fancy default handling
def load(self):
sv = self.saved_value
if sv is None:
self.value = self.default
else:
self.value = self.fromstring(sv)
def tostring(self, value):
return str(value)
# you need to override this if str(self.value) doesn't work
def save(self):
if self.save_disabled or (self.value == self.default and not self.save_forced):
self.saved_value = None
else:
self.saved_value = self.tostring(self.value)
if self.callNotifiersOnSaveAndCancel:
self.changed()
def cancel(self):
self.load()
if self.callNotifiersOnSaveAndCancel:
self.changed()
def isChanged(self):
sv = self.saved_value
if sv is None and self.value == self.default:
return False
return self.tostring(self.value) != sv
def changed(self):
if self.__notifiers:
for x in self.notifiers:
try:
if self.extra_args[x]:
x(self, self.extra_args[x])
else:
x(self)
except:
x(self)
def changedFinal(self):
if self.__notifiers_final:
for x in self.notifiers_final:
try:
if self.extra_args[x]:
x(self, self.extra_args[x])
else:
x(self)
except:
x(self)
def addNotifier(self, notifier, initial_call = True, immediate_feedback = True, extra_args=None):
if not extra_args: extra_args = []
assert callable(notifier), "notifiers must be callable"
try:
self.extra_args[notifier] = extra_args
except: pass
if immediate_feedback:
self.notifiers.append(notifier)
else:
self.notifiers_final.append(notifier)
# CHECKME:
# do we want to call the notifier
# - at all when adding it? (yes, though optional)
# - when the default is active? (yes)
# - when no value *yet* has been set,
# because no config has ever been read (currently yes)
# (though that's not so easy to detect.
# the entry could just be new.)
if initial_call:
if extra_args:
notifier(self,extra_args)
else:
notifier(self)
def removeNotifier(self, notifier):
notifier in self.notifiers and self.notifiers.remove(notifier)
notifier in self.notifiers_final and self.notifiers_final.remove(notifier)
def disableSave(self):
self.save_disabled = True
def __call__(self, selected):
return self.getMulti(selected)
def onSelect(self, session):
pass
def onDeselect(self, session):
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
KEY_LEFT = 0
KEY_RIGHT = 1
KEY_OK = 2
KEY_DELETE = 3
KEY_BACKSPACE = 4
KEY_HOME = 5
KEY_END = 6
KEY_TOGGLEOW = 7
KEY_ASCII = 8
KEY_TIMEOUT = 9
KEY_NUMBERS = range(12, 12+10)
KEY_0 = 12
KEY_9 = 12+9
def getKeyNumber(key):
assert key in KEY_NUMBERS
return key - KEY_0
class choicesList(object): # XXX: we might want a better name for this
LIST_TYPE_LIST = 1
LIST_TYPE_DICT = 2
def __init__(self, choices, type = None):
self.choices = choices
if type is None:
if isinstance(choices, list):
self.type = choicesList.LIST_TYPE_LIST
elif isinstance(choices, dict):
self.type = choicesList.LIST_TYPE_DICT
else:
assert False, "choices must be dict or list!"
else:
self.type = type
def __list__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[0] for x in self.choices]
else:
ret = self.choices.keys()
return ret or [""]
def __iter__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[0] for x in self.choices]
else:
ret = self.choices
return iter(ret or [""])
def __len__(self):
return len(self.choices) or 1
def __getitem__(self, index):
if self.type == choicesList.LIST_TYPE_LIST:
ret = self.choices[index]
if isinstance(ret, tuple):
ret = ret[0]
return ret
return self.choices.keys()[index]
def index(self, value):
try:
return self.__list__().index(value)
except (ValueError, IndexError):
# occurs e.g. when default is not in list
return 0
def __setitem__(self, index, value):
if self.type == choicesList.LIST_TYPE_LIST:
orig = self.choices[index]
if isinstance(orig, tuple):
self.choices[index] = (value, orig[1])
else:
self.choices[index] = value
else:
key = self.choices.keys()[index]
orig = self.choices[key]
del self.choices[key]
self.choices[value] = orig
def default(self):
choices = self.choices
if not choices:
return ""
if self.type is choicesList.LIST_TYPE_LIST:
default = choices[0]
if isinstance(default, tuple):
default = default[0]
else:
default = choices.keys()[0]
return default
class descriptionList(choicesList): # XXX: we might want a better name for this
def __list__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[1] for x in self.choices]
else:
ret = self.choices.values()
return ret or [""]
def __iter__(self):
return iter(self.__list__())
def __getitem__(self, index):
if self.type == choicesList.LIST_TYPE_LIST:
for x in self.choices:
if isinstance(x, tuple):
if x[0] == index:
return str(x[1])
elif x == index:
return str(x)
return str(index) # Fallback!
else:
return str(self.choices.get(index, ""))
def __setitem__(self, index, value):
if self.type == choicesList.LIST_TYPE_LIST:
i = self.index(index)
orig = self.choices[i]
if isinstance(orig, tuple):
self.choices[i] = (orig[0], value)
else:
self.choices[i] = value
else:
self.choices[index] = value
#
# ConfigSelection is a "one of.."-type.
# it has the "choices", usually a list, which contains
# (id, desc)-tuples (or just only the ids, in case the id
# will be used as description)
#
# all ids MUST be plain strings.
#
class ConfigSelection(ConfigElement):
def __init__(self, choices, default = None):
ConfigElement.__init__(self)
self.choices = choicesList(choices)
if default is None:
default = self.choices.default()
self._descr = None
self.default = self._value = self.last_value = default
def setChoices(self, choices, default = None):
self.choices = choicesList(choices)
if default is None:
default = self.choices.default()
self.default = default
if self.value not in self.choices:
self.value = default
def setValue(self, value):
if value in self.choices:
self._value = value
else:
self._value = self.default
self._descr = None
self.changed()
def tostring(self, val):
return val
def getValue(self):
return self._value
def setCurrentText(self, text):
i = self.choices.index(self.value)
self.choices[i] = text
self._descr = self.description[text] = text
self._value = text
value = property(getValue, setValue)
def getIndex(self):
return self.choices.index(self.value)
index = property(getIndex)
# GUI
def handleKey(self, key):
nchoices = len(self.choices)
if nchoices > 1:
i = self.choices.index(self.value)
if key == KEY_LEFT:
self.value = self.choices[(i + nchoices - 1) % nchoices]
elif key == KEY_RIGHT:
self.value = self.choices[(i + 1) % nchoices]
elif key == KEY_HOME:
self.value = self.choices[0]
elif key == KEY_END:
self.value = self.choices[nchoices - 1]
def selectNext(self):
nchoices = len(self.choices)
i = self.choices.index(self.value)
self.value = self.choices[(i + 1) % nchoices]
def getText(self):
if self._descr is not None:
return self._descr
descr = self._descr = self.description[self.value]
if descr:
return _(descr)
return descr
def getMulti(self, selected):
if self._descr is not None:
descr = self._descr
else:
descr = self._descr = self.description[self.value]
if descr:
return "text", _(descr)
return "text", descr
# HTML
def getHTML(self, id):
res = ""
for v in self.choices:
descr = self.description[v]
if self.value == v:
checked = 'checked="checked" '
else:
checked = ''
res += '<input type="radio" name="' + id + '" ' + checked + 'value="' + v + '">' + descr + "</input></br>\n"
return res
def unsafeAssign(self, value):
# setValue does check if value is in choices. This is safe enough.
self.value = value
description = property(lambda self: descriptionList(self.choices.choices, self.choices.type))
# a binary decision.
#
# several customized versions exist for different
# descriptions.
#
boolean_descriptions = {False: _("false"), True: _("true")}
class ConfigBoolean(ConfigElement):
def __init__(self, default = False, descriptions = boolean_descriptions):
ConfigElement.__init__(self)
self.descriptions = descriptions
self.value = self.last_value = self.default = default
def handleKey(self, key):
if key in (KEY_LEFT, KEY_RIGHT):
self.value = not self.value
elif key == KEY_HOME:
self.value = False
elif key == KEY_END:
self.value = True
def getText(self):
descr = self.descriptions[self.value]
if descr:
return _(descr)
return descr
def getMulti(self, selected):
descr = self.descriptions[self.value]
if descr:
return "text", _(descr)
return "text", descr
def tostring(self, value):
if not value:
return "false"
else:
return "true"
def fromstring(self, val):
if val == "true":
return True
else:
return False
def getHTML(self, id):
if self.value:
checked = ' checked="checked"'
else:
checked = ''
return '<input type="checkbox" name="' + id + '" value="1" ' + checked + " />"
# this is FLAWED. and must be fixed.
def unsafeAssign(self, value):
if value == "1":
self.value = True
else:
self.value = False
def onDeselect(self, session):
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
yes_no_descriptions = {False: _("no"), True: _("yes")}
class ConfigYesNo(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = yes_no_descriptions)
on_off_descriptions = {False: _("off"), True: _("on")}
class ConfigOnOff(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = on_off_descriptions)
enable_disable_descriptions = {False: _("disable"), True: _("enable")}
class ConfigEnableDisable(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = enable_disable_descriptions)
class ConfigDateTime(ConfigElement):
def __init__(self, default, formatstring, increment = 86400):
ConfigElement.__init__(self)
self.increment = increment
self.formatstring = formatstring
self.value = self.last_value = self.default = int(default)
def handleKey(self, key):
if key == KEY_LEFT:
self.value -= self.increment
elif key == KEY_RIGHT:
self.value += self.increment
elif key == KEY_HOME or key == KEY_END:
self.value = self.default
def getText(self):
return strftime(self.formatstring, localtime(self.value))
def getMulti(self, selected):
return "text", strftime(self.formatstring, localtime(self.value))
def fromstring(self, val):
return int(val)
# *THE* mighty config element class
#
# allows you to store/edit a sequence of values.
# can be used for IP-addresses, dates, plain integers, ...
# several helper exist to ease this up a bit.
#
class ConfigSequence(ConfigElement):
def __init__(self, seperator, limits, default, censor_char = ""):
ConfigElement.__init__(self)
assert isinstance(limits, list) and len(limits[0]) == 2, "limits must be [(min, max),...]-tuple-list"
assert censor_char == "" or len(censor_char) == 1, "censor char must be a single char (or \"\")"
#assert isinstance(default, list), "default must be a list"
#assert isinstance(default[0], int), "list must contain numbers"
#assert len(default) == len(limits), "length must match"
self.marked_pos = 0
self.seperator = seperator
self.limits = limits
self.censor_char = censor_char
self.last_value = self.default = default
self.value = copy_copy(default)
self.endNotifier = None
def validate(self):
max_pos = 0
num = 0
for i in self._value:
max_pos += len(str(self.limits[num][1]))
if self._value[num] < self.limits[num][0]:
self._value[num] = self.limits[num][0]
if self._value[num] > self.limits[num][1]:
self._value[num] = self.limits[num][1]
num += 1
if self.marked_pos >= max_pos:
if self.endNotifier:
for x in self.endNotifier:
x(self)
self.marked_pos = max_pos - 1
if self.marked_pos < 0:
self.marked_pos = 0
def validatePos(self):
if self.marked_pos < 0:
self.marked_pos = 0
total_len = sum([len(str(x[1])) for x in self.limits])
if self.marked_pos >= total_len:
self.marked_pos = total_len - 1
def addEndNotifier(self, notifier):
if self.endNotifier is None:
self.endNotifier = []
self.endNotifier.append(notifier)
def handleKey(self, key):
if key == KEY_LEFT:
self.marked_pos -= 1
self.validatePos()
elif key == KEY_RIGHT:
self.marked_pos += 1
self.validatePos()
elif key == KEY_HOME:
self.marked_pos = 0
self.validatePos()
elif key == KEY_END:
max_pos = 0
num = 0
for i in self._value:
max_pos += len(str(self.limits[num][1]))
num += 1
self.marked_pos = max_pos - 1
self.validatePos()
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
block_len = [len(str(x[1])) for x in self.limits]
total_len = sum(block_len)
pos = 0
blocknumber = 0
block_len_total = [0, ]
for x in block_len:
pos += block_len[blocknumber]
block_len_total.append(pos)
if pos - 1 >= self.marked_pos:
pass
else:
blocknumber += 1
# length of numberblock
number_len = len(str(self.limits[blocknumber][1]))
# position in the block
posinblock = self.marked_pos - block_len_total[blocknumber]
oldvalue = self._value[blocknumber]
olddec = oldvalue % 10 ** (number_len - posinblock) - (oldvalue % 10 ** (number_len - posinblock - 1))
newvalue = oldvalue - olddec + (10 ** (number_len - posinblock - 1) * number)
self._value[blocknumber] = newvalue
self.marked_pos += 1
self.validate()
self.changed()
def genText(self):
value = ""
mPos = self.marked_pos
num = 0
for i in self._value:
if value: #fixme no heading separator possible
value += self.seperator
if mPos >= len(value) - 1:
mPos += 1
if self.censor_char == "":
value += ("%0" + str(len(str(self.limits[num][1]))) + "d") % i
else:
value += (self.censor_char * len(str(self.limits[num][1])))
num += 1
return value, mPos
def getText(self):
(value, mPos) = self.genText()
return value
def getMulti(self, selected):
(value, mPos) = self.genText()
# only mark cursor when we are selected
# (this code is heavily ink optimized!)
if self.enabled:
return "mtext"[1-selected:], value, [mPos]
else:
return "text", value
def tostring(self, val):
return self.seperator.join([self.saveSingle(x) for x in val])
def saveSingle(self, v):
return str(v)
def fromstring(self, value):
return [int(x) for x in value.split(self.seperator)]
def onDeselect(self, session):
if self.last_value != self._value:
self.changedFinal()
self.last_value = copy_copy(self._value)
ip_limits = [(0,255),(0,255),(0,255),(0,255)]
class ConfigIP(ConfigSequence):
def __init__(self, default, auto_jump = False):
ConfigSequence.__init__(self, seperator = ".", limits = ip_limits, default = default)
self.block_len = [len(str(x[1])) for x in self.limits]
self.marked_block = 0
self.overwrite = True
self.auto_jump = auto_jump
def handleKey(self, key):
if key == KEY_LEFT:
if self.marked_block > 0:
self.marked_block -= 1
self.overwrite = True
elif key == KEY_RIGHT:
if self.marked_block < len(self.limits)-1:
self.marked_block += 1
self.overwrite = True
elif key == KEY_HOME:
self.marked_block = 0
self.overwrite = True
elif key == KEY_END:
self.marked_block = len(self.limits)-1
self.overwrite = True
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
oldvalue = self._value[self.marked_block]
if self.overwrite:
self._value[self.marked_block] = number
self.overwrite = False
else:
oldvalue *= 10
newvalue = oldvalue + number
if self.auto_jump and newvalue > self.limits[self.marked_block][1] and self.marked_block < len(self.limits)-1:
self.handleKey(KEY_RIGHT)
self.handleKey(key)
return
else:
self._value[self.marked_block] = newvalue
if len(str(self._value[self.marked_block])) >= self.block_len[self.marked_block]:
self.handleKey(KEY_RIGHT)
self.validate()
self.changed()
def genText(self):
value = ""
block_strlen = []
for i in self._value:
block_strlen.append(len(str(i)))
if value:
value += self.seperator
value += str(i)
leftPos = sum(block_strlen[:self.marked_block])+self.marked_block
rightPos = sum(block_strlen[:(self.marked_block+1)])+self.marked_block
mBlock = range(leftPos, rightPos)
return value, mBlock
def getMulti(self, selected):
(value, mBlock) = self.genText()
if self.enabled:
return "mtext"[1-selected:], value, mBlock
else:
return "text", value
def getHTML(self, id):
# we definitely don't want leading zeros
return '.'.join(["%d" % d for d in self.value])
mac_limits = [(1,255),(1,255),(1,255),(1,255),(1,255),(1,255)]
class ConfigMAC(ConfigSequence):
def __init__(self, default):
ConfigSequence.__init__(self, seperator = ":", limits = mac_limits, default = default)
class ConfigMacText(ConfigElement, NumericalTextInput):
def __init__(self, default = "", visible_width = False):
ConfigElement.__init__(self)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False)
self.marked_pos = 0
self.allmarked = (default != "")
self.fixed_size = 17
self.visible_width = visible_width
self.offset = 0
self.overwrite = 17
self.help_window = None
self.value = self.last_value = self.default = default
self.useableChars = '0123456789ABCDEF'
def validateMarker(self):
textlen = len(self.text)
if self.marked_pos > textlen-1:
self.marked_pos = textlen-1
elif self.marked_pos < 0:
self.marked_pos = 0
def insertChar(self, ch, pos, owr):
if self.text[pos] == ':':
pos += 1
if owr or self.overwrite:
self.text = self.text[0:pos] + ch + self.text[pos + 1:]
elif self.fixed_size:
self.text = self.text[0:pos] + ch + self.text[pos:-1]
else:
self.text = self.text[0:pos] + ch + self.text[pos:]
def handleKey(self, key):
if key == KEY_LEFT:
self.timeout()
if self.allmarked:
self.marked_pos = len(self.text)
self.allmarked = False
else:
if self.text[self.marked_pos-1] == ':':
self.marked_pos -= 2
else:
self.marked_pos -= 1
elif key == KEY_RIGHT:
self.timeout()
if self.allmarked:
self.marked_pos = 0
self.allmarked = False
else:
if self.marked_pos < (len(self.text)-1):
if self.text[self.marked_pos+1] == ':':
self.marked_pos += 2
else:
self.marked_pos += 1
elif key in KEY_NUMBERS:
owr = self.lastKey == getKeyNumber(key)
newChar = self.getKey(getKeyNumber(key))
self.insertChar(newChar, self.marked_pos, owr)
elif key == KEY_TIMEOUT:
self.timeout()
if self.help_window:
self.help_window.update(self)
if self.text[self.marked_pos] == ':':
self.marked_pos += 1
return
if self.help_window:
self.help_window.update(self)
self.validateMarker()
self.changed()
def nextFunc(self):
self.marked_pos += 1
self.validateMarker()
self.changed()
def getValue(self):
try:
return self.text.encode("utf-8")
except UnicodeDecodeError:
print "Broken UTF8!"
return self.text
def setValue(self, val):
try:
self.text = val.decode("utf-8")
except UnicodeDecodeError:
self.text = val.decode("utf-8", "ignore")
print "Broken UTF8!"
value = property(getValue, setValue)
_value = property(getValue, setValue)
def getText(self):
return self.text.encode("utf-8")
def getMulti(self, selected):
if self.visible_width:
if self.allmarked:
mark = range(0, min(self.visible_width, len(self.text)))
else:
mark = [self.marked_pos-self.offset]
return "mtext"[1-selected:], self.text[self.offset:self.offset+self.visible_width].encode("utf-8")+" ", mark
else:
if self.allmarked:
mark = range(0, len(self.text))
else:
mark = [self.marked_pos]
return "mtext"[1-selected:], self.text.encode("utf-8")+" ", mark
def onSelect(self, session):
self.allmarked = (self.value != "")
if session is not None:
from Screens.NumericalTextInputHelpDialog import NumericalTextInputHelpDialog
self.help_window = session.instantiateDialog(NumericalTextInputHelpDialog, self)
self.help_window.setAnimationMode(0)
self.help_window.show()
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if self.help_window:
session.deleteDialog(self.help_window)
self.help_window = None
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
def getHTML(self, id):
return '<input type="text" name="' + id + '" value="' + self.value + '" /><br>\n'
def unsafeAssign(self, value):
self.value = str(value)
class ConfigPosition(ConfigSequence):
def __init__(self, default, args):
ConfigSequence.__init__(self, seperator = ",", limits = [(0,args[0]),(0,args[1]),(0,args[2]),(0,args[3])], default = default)
clock_limits = [(0,23),(0,59)]
class ConfigClock(ConfigSequence):
def __init__(self, default):
t = localtime(default)
ConfigSequence.__init__(self, seperator = ":", limits = clock_limits, default = [t.tm_hour, t.tm_min])
def increment(self):
# Check if Minutes maxed out
if self._value[1] == 59:
# Increment Hour, reset Minutes
if self._value[0] < 23:
self._value[0] += 1
else:
self._value[0] = 0
self._value[1] = 0
else:
# Increment Minutes
self._value[1] += 1
# Trigger change
self.changed()
def decrement(self):
# Check if Minutes is minimum
if self._value[1] == 0:
# Decrement Hour, set Minutes to 59
if self._value[0] > 0:
self._value[0] -= 1
else:
self._value[0] = 23
self._value[1] = 59
else:
# Decrement Minutes
self._value[1] -= 1
# Trigger change
self.changed()
integer_limits = (0, 9999999999)
class ConfigInteger(ConfigSequence):
def __init__(self, default, limits = integer_limits):
ConfigSequence.__init__(self, seperator = ":", limits = [limits], default = default)
# you need to override this to do input validation
def setValue(self, value):
self._value = [value]
self.changed()
def getValue(self):
return self._value[0]
value = property(getValue, setValue)
def fromstring(self, value):
return int(value)
def tostring(self, value):
return str(value)
class ConfigPIN(ConfigInteger):
def __init__(self, default, len = 4, censor = ""):
assert isinstance(default, int), "ConfigPIN default must be an integer"
ConfigSequence.__init__(self, seperator = ":", limits = [(0, (10**len)-1)], censor_char = censor, default = default)
self.len = len
def getLength(self):
return self.len
class ConfigFloat(ConfigSequence):
def __init__(self, default, limits):
ConfigSequence.__init__(self, seperator = ".", limits = limits, default = default)
def getFloat(self):
return float(self.value[1] / float(self.limits[1][1] + 1) + self.value[0])
float = property(getFloat)
# an editable text...
class ConfigText(ConfigElement, NumericalTextInput):
def __init__(self, default = "", fixed_size = True, visible_width = False):
ConfigElement.__init__(self)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False)
self.marked_pos = 0
self.allmarked = (default != "")
self.fixed_size = fixed_size
self.visible_width = visible_width
self.offset = 0
self.overwrite = fixed_size
self.help_window = None
self.value = self.last_value = self.default = default
def validateMarker(self):
textlen = len(self.text)
if self.fixed_size:
if self.marked_pos > textlen-1:
self.marked_pos = textlen-1
else:
if self.marked_pos > textlen:
self.marked_pos = textlen
if self.marked_pos < 0:
self.marked_pos = 0
if self.visible_width:
if self.marked_pos < self.offset:
self.offset = self.marked_pos
if self.marked_pos >= self.offset + self.visible_width:
if self.marked_pos == textlen:
self.offset = self.marked_pos - self.visible_width
else:
self.offset = self.marked_pos - self.visible_width + 1
if self.offset > 0 and self.offset + self.visible_width > textlen:
self.offset = max(0, len - self.visible_width)
def insertChar(self, ch, pos, owr):
if owr or self.overwrite:
self.text = self.text[0:pos] + ch + self.text[pos + 1:]
elif self.fixed_size:
self.text = self.text[0:pos] + ch + self.text[pos:-1]
else:
self.text = self.text[0:pos] + ch + self.text[pos:]
def deleteChar(self, pos):
if not self.fixed_size:
self.text = self.text[0:pos] + self.text[pos + 1:]
elif self.overwrite:
self.text = self.text[0:pos] + " " + self.text[pos + 1:]
else:
self.text = self.text[0:pos] + self.text[pos + 1:] + " "
def deleteAllChars(self):
if self.fixed_size:
self.text = " " * len(self.text)
else:
self.text = ""
self.marked_pos = 0
def handleKey(self, key):
# this will no change anything on the value itself
# so we can handle it here in gui element
if key == KEY_DELETE:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
else:
self.deleteChar(self.marked_pos)
if self.fixed_size and self.overwrite:
self.marked_pos += 1
elif key == KEY_BACKSPACE:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
elif self.marked_pos > 0:
self.deleteChar(self.marked_pos-1)
if not self.fixed_size and self.offset > 0:
self.offset -= 1
self.marked_pos -= 1
elif key == KEY_LEFT:
self.timeout()
if self.allmarked:
self.marked_pos = len(self.text)
self.allmarked = False
else:
self.marked_pos -= 1
elif key == KEY_RIGHT:
self.timeout()
if self.allmarked:
self.marked_pos = 0
self.allmarked = False
else:
self.marked_pos += 1
elif key == KEY_HOME:
self.timeout()
self.allmarked = False
self.marked_pos = 0
elif key == KEY_END:
self.timeout()
self.allmarked = False
self.marked_pos = len(self.text)
elif key == KEY_TOGGLEOW:
self.timeout()
self.overwrite = not self.overwrite
elif key == KEY_ASCII:
self.timeout()
newChar = unichr(getPrevAsciiCode())
if not self.useableChars or newChar in self.useableChars:
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, False)
self.marked_pos += 1
elif key in KEY_NUMBERS:
owr = self.lastKey == getKeyNumber(key)
newChar = self.getKey(getKeyNumber(key))
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, owr)
elif key == KEY_TIMEOUT:
self.timeout()
if self.help_window:
self.help_window.update(self)
return
if self.help_window:
self.help_window.update(self)
self.validateMarker()
self.changed()
def nextFunc(self):
self.marked_pos += 1
self.validateMarker()
self.changed()
def getValue(self):
try:
return self.text.encode("utf-8")
except UnicodeDecodeError:
print "Broken UTF8!"
return self.text
def setValue(self, val):
try:
self.text = val.decode("utf-8")
except UnicodeDecodeError:
self.text = val.decode("utf-8", "ignore")
print "Broken UTF8!"
value = property(getValue, setValue)
_value = property(getValue, setValue)
def getText(self):
return self.text.encode("utf-8")
def getMulti(self, selected):
if self.visible_width:
if self.allmarked:
mark = range(0, min(self.visible_width, len(self.text)))
else:
mark = [self.marked_pos-self.offset]
return "mtext"[1-selected:], self.text[self.offset:self.offset+self.visible_width].encode("utf-8")+" ", mark
else:
if self.allmarked:
mark = range(0, len(self.text))
else:
mark = [self.marked_pos]
return "mtext"[1-selected:], self.text.encode("utf-8")+" ", mark
def onSelect(self, session):
self.allmarked = (self.value != "")
if session is not None:
from Screens.NumericalTextInputHelpDialog import NumericalTextInputHelpDialog
self.help_window = session.instantiateDialog(NumericalTextInputHelpDialog, self)
self.help_window.setAnimationMode(0)
self.help_window.show()
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if self.help_window:
session.deleteDialog(self.help_window)
self.help_window = None
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
def getHTML(self, id):
return '<input type="text" name="' + id + '" value="' + self.value + '" /><br>\n'
def unsafeAssign(self, value):
self.value = str(value)
class ConfigPassword(ConfigText):
def __init__(self, default = "", fixed_size = False, visible_width = False, censor = "*"):
ConfigText.__init__(self, default = default, fixed_size = fixed_size, visible_width = visible_width)
self.censor_char = censor
self.hidden = True
def getMulti(self, selected):
mtext, text, mark = ConfigText.getMulti(self, selected)
if self.hidden:
text = len(text) * self.censor_char
return mtext, text, mark
def onSelect(self, session):
ConfigText.onSelect(self, session)
self.hidden = False
def onDeselect(self, session):
ConfigText.onDeselect(self, session)
self.hidden = True
# lets the user select between [min, min+stepwidth, min+(stepwidth*2)..., maxval] with maxval <= max depending
# on the stepwidth
# min, max, stepwidth, default are int values
# wraparound: pressing RIGHT key at max value brings you to min value and vice versa if set to True
class ConfigSelectionNumber(ConfigSelection):
def __init__(self, min, max, stepwidth, default = None, wraparound = False):
self.wraparound = wraparound
if default is None:
default = min
default = str(default)
choices = []
step = min
while step <= max:
choices.append(str(step))
step += stepwidth
ConfigSelection.__init__(self, choices, default)
def getValue(self):
return int(ConfigSelection.getValue(self))
def setValue(self, val):
ConfigSelection.setValue(self, str(val))
value = property(getValue, setValue)
def getIndex(self):
return self.choices.index(self.value)
index = property(getIndex)
def handleKey(self, key):
if not self.wraparound:
if key == KEY_RIGHT:
if len(self.choices) == (self.choices.index(str(self.value)) + 1):
return
if key == KEY_LEFT:
if self.choices.index(str(self.value)) == 0:
return
nchoices = len(self.choices)
if nchoices > 1:
i = self.choices.index(str(self.value))
if key == KEY_LEFT:
self.value = self.choices[(i + nchoices - 1) % nchoices]
elif key == KEY_RIGHT:
self.value = self.choices[(i + 1) % nchoices]
elif key == KEY_HOME:
self.value = self.choices[0]
elif key == KEY_END:
self.value = self.choices[nchoices - 1]
class ConfigNumber(ConfigText):
def __init__(self, default = 0):
ConfigText.__init__(self, str(default), fixed_size = False)
def getValue(self):
return int(self.text)
def setValue(self, val):
self.text = str(val)
value = property(getValue, setValue)
_value = property(getValue, setValue)
def isChanged(self):
sv = self.saved_value
strv = self.tostring(self.value)
if sv is None and strv == self.default:
return False
return strv != sv
def conform(self):
pos = len(self.text) - self.marked_pos
self.text = self.text.lstrip("0")
if self.text == "":
self.text = "0"
if pos > len(self.text):
self.marked_pos = 0
else:
self.marked_pos = len(self.text) - pos
def handleKey(self, key):
if key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
ascii = getPrevAsciiCode()
if not (48 <= ascii <= 57):
return
else:
ascii = getKeyNumber(key) + 48
newChar = unichr(ascii)
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, False)
self.marked_pos += 1
else:
ConfigText.handleKey(self, key)
self.conform()
def onSelect(self, session):
self.allmarked = (self.value != "")
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
class ConfigSearchText(ConfigText):
def __init__(self, default = "", fixed_size = False, visible_width = False):
ConfigText.__init__(self, default = default, fixed_size = fixed_size, visible_width = visible_width)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False, search = True)
class ConfigDirectory(ConfigText):
def __init__(self, default="", visible_width=60):
ConfigText.__init__(self, default, fixed_size = True, visible_width = visible_width)
def handleKey(self, key):
pass
def getValue(self):
if self.text == "":
return None
else:
return ConfigText.getValue(self)
def setValue(self, val):
if val is None:
val = ""
ConfigText.setValue(self, val)
def getMulti(self, selected):
if self.text == "":
return "mtext"[1-selected:], _("List of storage devices"), range(0)
else:
return ConfigText.getMulti(self, selected)
def onSelect(self, session):
self.allmarked = (self.value != "")
# a slider.
class ConfigSlider(ConfigElement):
def __init__(self, default = 0, increment = 1, limits = (0, 100)):
ConfigElement.__init__(self)
self.value = self.last_value = self.default = default
self.min = limits[0]
self.max = limits[1]
self.increment = increment
def checkValues(self):
if self.value < self.min:
self.value = self.min
if self.value > self.max:
self.value = self.max
def handleKey(self, key):
if key == KEY_LEFT:
self.value -= self.increment
elif key == KEY_RIGHT:
self.value += self.increment
elif key == KEY_HOME:
self.value = self.min
elif key == KEY_END:
self.value = self.max
else:
return
self.checkValues()
def getText(self):
return "%d / %d" % (self.value, self.max)
def getMulti(self, selected):
self.checkValues()
return "slider", self.value, self.max
def fromstring(self, value):
return int(value)
# a satlist. in fact, it's a ConfigSelection.
class ConfigSatlist(ConfigSelection):
def __init__(self, list, default = None):
if default is not None:
default = str(default)
ConfigSelection.__init__(self, choices = [(str(orbpos), desc) for (orbpos, desc, flags) in list], default = default)
def getOrbitalPosition(self):
if self.value == "":
return None
return int(self.value)
orbital_position = property(getOrbitalPosition)
class ConfigSet(ConfigElement):
def __init__(self, choices, default=None):
if not default: default = []
ConfigElement.__init__(self)
if isinstance(choices, list):
choices.sort()
self.choices = choicesList(choices, choicesList.LIST_TYPE_LIST)
else:
assert False, "ConfigSet choices must be a list!"
if default is None:
default = []
self.pos = -1
default.sort()
self.last_value = self.default = default
self.value = default[:]
def toggleChoice(self, choice):
value = self.value
if choice in value:
value.remove(choice)
else:
value.append(choice)
value.sort()
self.changed()
def handleKey(self, key):
if key in KEY_NUMBERS + [KEY_DELETE, KEY_BACKSPACE]:
if self.pos != -1:
self.toggleChoice(self.choices[self.pos])
elif key == KEY_LEFT:
if self.pos < 0:
self.pos = len(self.choices)-1
else:
self.pos -= 1
elif key == KEY_RIGHT:
if self.pos >= len(self.choices)-1:
self.pos = -1
else:
self.pos += 1
elif key in (KEY_HOME, KEY_END):
self.pos = -1
def genString(self, lst):
res = ""
for x in lst:
res += self.description[x]+" "
return res
def getText(self):
return self.genString(self.value)
def getMulti(self, selected):
if not selected or self.pos == -1:
return "text", self.genString(self.value)
else:
tmp = self.value[:]
ch = self.choices[self.pos]
mem = ch in self.value
if not mem:
tmp.append(ch)
tmp.sort()
ind = tmp.index(ch)
val1 = self.genString(tmp[:ind])
val2 = " "+self.genString(tmp[ind+1:])
if mem:
chstr = " "+self.description[ch]+" "
else:
chstr = "("+self.description[ch]+")"
len_val1 = len(val1)
return "mtext", val1+chstr+val2, range(len_val1, len_val1 + len(chstr))
def onDeselect(self, session):
self.pos = -1
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value[:]
def tostring(self, value):
return str(value)
def fromstring(self, val):
return eval(val)
description = property(lambda self: descriptionList(self.choices.choices, choicesList.LIST_TYPE_LIST))
class ConfigLocations(ConfigElement):
def __init__(self, default=None, visible_width=False):
if not default: default = []
ConfigElement.__init__(self)
self.visible_width = visible_width
self.pos = -1
self.default = default
self.locations = []
self.mountpoints = []
self.value = default[:]
def setValue(self, value):
locations = self.locations
loc = [x[0] for x in locations if x[3]]
add = [x for x in value if not x in loc]
diff = add + [x for x in loc if not x in value]
locations = [x for x in locations if not x[0] in diff] + [[x, self.getMountpoint(x), True, True] for x in add]
#locations.sort(key = lambda x: x[0])
self.locations = locations
self.changed()
def getValue(self):
self.checkChangedMountpoints()
locations = self.locations
for x in locations:
x[3] = x[2]
return [x[0] for x in locations if x[3]]
value = property(getValue, setValue)
def tostring(self, value):
return str(value)
def fromstring(self, val):
return eval(val)
def load(self):
sv = self.saved_value
if sv is None:
tmp = self.default
else:
tmp = self.fromstring(sv)
locations = [[x, None, False, False] for x in tmp]
self.refreshMountpoints()
for x in locations:
if fileExists(x[0]):
x[1] = self.getMountpoint(x[0])
x[2] = True
self.locations = locations
def save(self):
locations = self.locations
if self.save_disabled or not locations:
self.saved_value = None
else:
self.saved_value = self.tostring([x[0] for x in locations])
def isChanged(self):
sv = self.saved_value
locations = self.locations
if val is None and not locations:
return False
return self.tostring([x[0] for x in locations]) != sv
def addedMount(self, mp):
for x in self.locations:
if x[1] == mp:
x[2] = True
elif x[1] is None and fileExists(x[0]):
x[1] = self.getMountpoint(x[0])
x[2] = True
def removedMount(self, mp):
for x in self.locations:
if x[1] == mp:
x[2] = False
def refreshMountpoints(self):
self.mountpoints = [p.mountpoint for p in harddiskmanager.getMountedPartitions() if p.mountpoint != "/"]
self.mountpoints.sort(key = lambda x: -len(x))
def checkChangedMountpoints(self):
oldmounts = self.mountpoints
self.refreshMountpoints()
newmounts = self.mountpoints
if oldmounts == newmounts:
return
for x in oldmounts:
if not x in newmounts:
self.removedMount(x)
for x in newmounts:
if not x in oldmounts:
self.addedMount(x)
def getMountpoint(self, file):
file = os_path.realpath(file)+"/"
for m in self.mountpoints:
if file.startswith(m):
return m
return None
def handleKey(self, key):
if key == KEY_LEFT:
self.pos -= 1
if self.pos < -1:
self.pos = len(self.value)-1
elif key == KEY_RIGHT:
self.pos += 1
if self.pos >= len(self.value):
self.pos = -1
elif key in (KEY_HOME, KEY_END):
self.pos = -1
def getText(self):
return " ".join(self.value)
def getMulti(self, selected):
if not selected:
valstr = " ".join(self.value)
if self.visible_width and len(valstr) > self.visible_width:
return "text", valstr[0:self.visible_width]
else:
return "text", valstr
else:
i = 0
valstr = ""
ind1 = 0
ind2 = 0
for val in self.value:
if i == self.pos:
ind1 = len(valstr)
valstr += str(val)+" "
if i == self.pos:
ind2 = len(valstr)
i += 1
if self.visible_width and len(valstr) > self.visible_width:
if ind1+1 < self.visible_width/2:
off = 0
else:
off = min(ind1+1-self.visible_width/2, len(valstr)-self.visible_width)
return "mtext", valstr[off:off+self.visible_width], range(ind1-off,ind2-off)
else:
return "mtext", valstr, range(ind1,ind2)
def onDeselect(self, session):
self.pos = -1
# nothing.
class ConfigNothing(ConfigSelection):
def __init__(self):
ConfigSelection.__init__(self, choices = [("","")])
# until here, 'saved_value' always had to be a *string*.
# now, in ConfigSubsection, and only there, saved_value
# is a dict, essentially forming a tree.
#
# config.foo.bar=True
# config.foobar=False
#
# turns into:
# config.saved_value == {"foo": {"bar": "True"}, "foobar": "False"}
#
class ConfigSubsectionContent(object):
pass
# we store a backup of the loaded configuration
# data in self.stored_values, to be able to deploy
# them when a new config element will be added,
# so non-default values are instantly available
# A list, for example:
# config.dipswitches = ConfigSubList()
# config.dipswitches.append(ConfigYesNo())
# config.dipswitches.append(ConfigYesNo())
# config.dipswitches.append(ConfigYesNo())
class ConfigSubList(list, object):
def __init__(self):
list.__init__(self)
self.stored_values = {}
def save(self):
for x in self:
x.save()
def load(self):
for x in self:
x.load()
def getSavedValue(self):
res = { }
for i, val in enumerate(self):
sv = val.saved_value
if sv is not None:
res[str(i)] = sv
return res
def setSavedValue(self, values):
self.stored_values = dict(values)
for (key, val) in self.stored_values.items():
if int(key) < len(self):
self[int(key)].saved_value = val
saved_value = property(getSavedValue, setSavedValue)
def append(self, item):
i = str(len(self))
list.append(self, item)
if i in self.stored_values:
item.saved_value = self.stored_values[i]
item.load()
def dict(self):
return dict([(str(index), value) for index, value in enumerate(self)])
# same as ConfigSubList, just as a dictionary.
# care must be taken that the 'key' has a proper
# str() method, because it will be used in the config
# file.
class ConfigSubDict(dict, object):
def __init__(self):
dict.__init__(self)
self.stored_values = {}
def save(self):
for x in self.values():
x.save()
def load(self):
for x in self.values():
x.load()
def getSavedValue(self):
res = {}
for (key, val) in self.items():
sv = val.saved_value
if sv is not None:
res[str(key)] = sv
return res
def setSavedValue(self, values):
self.stored_values = dict(values)
for (key, val) in self.items():
if str(key) in self.stored_values:
val.saved_value = self.stored_values[str(key)]
saved_value = property(getSavedValue, setSavedValue)
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
if str(key) in self.stored_values:
item.saved_value = self.stored_values[str(key)]
item.load()
def dict(self):
return self
# Like the classes above, just with a more "native"
# syntax.
#
# some evil stuff must be done to allow instant
# loading of added elements. this is why this class
# is so complex.
#
# we need the 'content' because we overwrite
# __setattr__.
# If you don't understand this, try adding
# __setattr__ to a usual exisiting class and you will.
class ConfigSubsection(object):
def __init__(self):
self.__dict__["content"] = ConfigSubsectionContent()
self.content.items = { }
self.content.stored_values = { }
def __setattr__(self, name, value):
if name == "saved_value":
return self.setSavedValue(value)
assert isinstance(value, (ConfigSubsection, ConfigElement, ConfigSubList, ConfigSubDict)), "ConfigSubsections can only store ConfigSubsections, ConfigSubLists, ConfigSubDicts or ConfigElements"
content = self.content
content.items[name] = value
x = content.stored_values.get(name, None)
if x is not None:
#print "ok, now we have a new item,", name, "and have the following value for it:", x
value.saved_value = x
value.load()
def __getattr__(self, name):
return self.content.items[name]
def getSavedValue(self):
res = self.content.stored_values
for (key, val) in self.content.items.items():
sv = val.saved_value
if sv is not None:
res[key] = sv
elif key in res:
del res[key]
return res
def setSavedValue(self, values):
values = dict(values)
self.content.stored_values = values
for (key, val) in self.content.items.items():
value = values.get(key, None)
if value is not None:
val.saved_value = value
saved_value = property(getSavedValue, setSavedValue)
def save(self):
for x in self.content.items.values():
x.save()
def load(self):
for x in self.content.items.values():
x.load()
def dict(self):
return self.content.items
# the root config object, which also can "pickle" (=serialize)
# down the whole config tree.
#
# we try to keep non-existing config entries, to apply them whenever
# a new config entry is added to a subsection
# also, non-existing config entries will be saved, so they won't be
# lost when a config entry disappears.
class Config(ConfigSubsection):
def __init__(self):
ConfigSubsection.__init__(self)
def pickle_this(self, prefix, topickle, result):
for (key, val) in topickle.items():
name = '.'.join((prefix, key))
if isinstance(val, dict):
self.pickle_this(name, val, result)
elif isinstance(val, tuple):
result += [name, '=', str(val[0]), '\n']
else:
result += [name, '=', str(val), '\n']
def pickle(self):
result = []
self.pickle_this("config", self.saved_value, result)
return ''.join(result)
def unpickle(self, lines, base_file=True):
tree = { }
configbase = tree.setdefault("config", {})
for l in lines:
if not l or l[0] == '#':
continue
result = l.split('=', 1)
if len(result) != 2:
continue
(name, val) = result
val = val.strip()
names = name.split('.')
base = configbase
for n in names[1:-1]:
base = base.setdefault(n, {})
base[names[-1]] = val
if not base_file: # not the initial config file..
#update config.x.y.value when exist
try:
configEntry = eval(name)
if configEntry is not None:
configEntry.value = val
except (SyntaxError, KeyError):
pass
# we inherit from ConfigSubsection, so ...
#object.__setattr__(self, "saved_value", tree["config"])
if "config" in tree:
self.setSavedValue(tree["config"])
def saveToFile(self, filename):
text = self.pickle()
try:
import os
f = open(filename + ".writing", "w")
f.write(text)
f.flush()
os.fsync(f.fileno())
f.close()
os.rename(filename + ".writing", filename)
except IOError:
print "Config: Couldn't write %s" % filename
def loadFromFile(self, filename, base_file=True):
self.unpickle(open(filename, "r"), base_file)
config = Config()
config.misc = ConfigSubsection()
class ConfigFile:
def __init__(self):
pass
CONFIG_FILE = resolveFilename(SCOPE_CONFIG, "settings")
def load(self):
try:
config.loadFromFile(self.CONFIG_FILE, True)
print "Config file loaded ok..."
except IOError, e:
print "unable to load config (%s), assuming defaults..." % str(e)
def save(self):
# config.save()
config.saveToFile(self.CONFIG_FILE)
def __resolveValue(self, pickles, cmap):
key = pickles[0]
if cmap.has_key(key):
if len(pickles) > 1:
return self.__resolveValue(pickles[1:], cmap[key].dict())
else:
return str(cmap[key].value)
return None
def getResolvedKey(self, key):
names = key.split('.')
if len(names) > 1:
if names[0] == "config":
ret=self.__resolveValue(names[1:], config.content.items)
if ret and len(ret):
return ret
print "getResolvedKey", key, "failed !! (Typo??)"
return ""
def NoSave(element):
element.disableSave()
return element
configfile = ConfigFile()
configfile.load()
def getConfigListEntry(*args):
assert len(args) > 1, "getConfigListEntry needs a minimum of two arguments (descr, configElement)"
return args
def updateConfigElement(element, newelement):
newelement.value = element.value
return newelement
#def _(x):
# return x
#
#config.bla = ConfigSubsection()
#config.bla.test = ConfigYesNo()
#config.nim = ConfigSubList()
#config.nim.append(ConfigSubsection())
#config.nim[0].bla = ConfigYesNo()
#config.nim.append(ConfigSubsection())
#config.nim[1].bla = ConfigYesNo()
#config.nim[1].blub = ConfigYesNo()
#config.arg = ConfigSubDict()
#config.arg["Hello"] = ConfigYesNo()
#
#config.arg["Hello"].handleKey(KEY_RIGHT)
#config.arg["Hello"].handleKey(KEY_RIGHT)
#
##config.saved_value
#
##configfile.save()
#config.save()
#print config.pickle()
cec_limits = [(0,15),(0,15),(0,15),(0,15)]
class ConfigCECAddress(ConfigSequence):
def __init__(self, default, auto_jump = False):
ConfigSequence.__init__(self, seperator = ".", limits = cec_limits, default = default)
self.block_len = [len(str(x[1])) for x in self.limits]
self.marked_block = 0
self.overwrite = True
self.auto_jump = auto_jump
def handleKey(self, key):
if key == KEY_LEFT:
if self.marked_block > 0:
self.marked_block -= 1
self.overwrite = True
elif key == KEY_RIGHT:
if self.marked_block < len(self.limits)-1:
self.marked_block += 1
self.overwrite = True
elif key == KEY_HOME:
self.marked_block = 0
self.overwrite = True
elif key == KEY_END:
self.marked_block = len(self.limits)-1
self.overwrite = True
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
oldvalue = self._value[self.marked_block]
if self.overwrite:
self._value[self.marked_block] = number
self.overwrite = False
else:
oldvalue *= 10
newvalue = oldvalue + number
if self.auto_jump and newvalue > self.limits[self.marked_block][1] and self.marked_block < len(self.limits)-1:
self.handleKey(KEY_RIGHT)
self.handleKey(key)
return
else:
self._value[self.marked_block] = newvalue
if len(str(self._value[self.marked_block])) >= self.block_len[self.marked_block]:
self.handleKey(KEY_RIGHT)
self.validate()
self.changed()
def genText(self):
value = ""
block_strlen = []
for i in self._value:
block_strlen.append(len(str(i)))
if value:
value += self.seperator
value += str(i)
leftPos = sum(block_strlen[:self.marked_block])+self.marked_block
rightPos = sum(block_strlen[:(self.marked_block+1)])+self.marked_block
mBlock = range(leftPos, rightPos)
return value, mBlock
def getMulti(self, selected):
(value, mBlock) = self.genText()
if self.enabled:
return "mtext"[1-selected:], value, mBlock
else:
return "text", value
def getHTML(self, id):
# we definitely don't want leading zeros
return '.'.join(["%d" % d for d in self.value])
| gpl-2.0 |
ZachGoldberg/django-oscar-paypal | docs/conf.py | 9 | 8265 | # -*- coding: utf-8 -*-
#
# django-oscar-paypal documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 22 20:12:56 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
PROJECT_ROOT = os.path.join(os.path.dirname(__file__), '..')
sys.path.insert(0, PROJECT_ROOT)
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
)
#os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sandbox.settings')
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-oscar-paypal'
copyright = u'2012, David Winterbottom'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from paypal import VERSION
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-oscar-paypaldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-oscar-paypal.tex', u'django-oscar-paypal Documentation',
u'David Winterbottom', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-oscar-paypal', u'django-oscar-paypal Documentation',
[u'David Winterbottom'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-oscar-paypal', u'django-oscar-paypal Documentation',
u'David Winterbottom', 'django-oscar-paypal', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| bsd-3-clause |
untitaker/vdirsyncer | vdirsyncer/sync/__init__.py | 1 | 11708 | # -*- coding: utf-8 -*-
'''
The `sync` function in `vdirsyncer.sync` can be called on two instances of
`Storage` to synchronize them. Apart from the defined errors, this is the only
public API of this module.
The algorithm is based on the blogpost "How OfflineIMAP works" by Edward Z.
Yang: http://blog.ezyang.com/2012/08/how-offlineimap-works/
Some modifications to it are explained in
https://unterwaditzer.net/2016/sync-algorithm.html
'''
import contextlib
import itertools
import logging
from ..exceptions import UserError
from ..utils import uniq
from .status import SubStatus, ItemMetadata
from .exceptions import BothReadOnly, IdentAlreadyExists, PartialSync, \
StorageEmpty, SyncConflict
sync_logger = logging.getLogger(__name__)
class _StorageInfo(object):
'''A wrapper class that holds prefetched items, the status and other
things.'''
def __init__(self, storage, status):
self.storage = storage
self.status = status
self._item_cache = {}
def prepare_new_status(self):
storage_nonempty = False
prefetch = []
def _store_props(ident, props):
try:
self.status.insert_ident(ident, props)
except IdentAlreadyExists as e:
raise e.to_ident_conflict(self.storage)
for href, etag in self.storage.list():
storage_nonempty = True
ident, meta = self.status.get_by_href(href)
if meta is None or meta.href != href or meta.etag != etag:
# Either the item is completely new, or updated
# In both cases we should prefetch
prefetch.append(href)
else:
# Metadata is completely identical
_store_props(ident, meta)
# Prefetch items
for href, item, etag in (self.storage.get_multi(prefetch)
if prefetch else ()):
if not item.is_parseable:
sync_logger.warning(
'Storage "{}": item {} is malformed. '
'Please try to repair it.'
.format(self.storage.instance_name, href)
)
_store_props(item.ident, ItemMetadata(
href=href,
hash=item.hash,
etag=etag
))
self.set_item_cache(item.ident, item)
return storage_nonempty
def is_changed(self, ident):
old_meta = self.status.get(ident)
if old_meta is None: # new item
return True
new_meta = self.status.get_new(ident)
return (
new_meta.etag != old_meta.etag and # etag changed
# item actually changed
(old_meta.hash is None or new_meta.hash != old_meta.hash)
)
def set_item_cache(self, ident, item):
actual_hash = self.status.get_new(ident).hash
assert actual_hash == item.hash
self._item_cache[ident] = item
def get_item_cache(self, ident):
return self._item_cache[ident]
def sync(storage_a, storage_b, status, conflict_resolution=None,
force_delete=False, error_callback=None, partial_sync='revert'):
'''Synchronizes two storages.
:param storage_a: The first storage
:type storage_a: :class:`vdirsyncer.storage.base.Storage`
:param storage_b: The second storage
:type storage_b: :class:`vdirsyncer.storage.base.Storage`
:param status: {ident: (href_a, etag_a, href_b, etag_b)}
metadata about the two storages for detection of changes. Will be
modified by the function and should be passed to it at the next sync.
If this is the first sync, an empty dictionary should be provided.
:param conflict_resolution: A function that, given two conflicting item
versions A and B, returns a new item with conflicts resolved. The UID
must be the same. The strings `"a wins"` and `"b wins"` are also
accepted to mean that side's version will always be taken. If none
is provided, the sync function will raise :py:exc:`SyncConflict`.
:param force_delete: When one storage got completely emptied between two
syncs, :py:exc:`StorageEmpty` is raised for
safety. Setting this parameter to ``True`` disables this safety
measure.
:param error_callback: Instead of raising errors when executing actions,
call the given function with an `Exception` as the only argument.
:param partial_sync: What to do when doing sync actions on read-only
storages.
- ``error``: Raise an error.
- ``ignore``: Those actions are simply skipped.
- ``revert`` (default): Revert changes on other side.
'''
if storage_a.read_only and storage_b.read_only:
raise BothReadOnly()
if conflict_resolution == 'a wins':
conflict_resolution = lambda a, b: a
elif conflict_resolution == 'b wins':
conflict_resolution = lambda a, b: b
status_nonempty = bool(next(status.iter_old(), None))
with status.transaction():
a_info = _StorageInfo(storage_a, SubStatus(status, 'a'))
b_info = _StorageInfo(storage_b, SubStatus(status, 'b'))
a_nonempty = a_info.prepare_new_status()
b_nonempty = b_info.prepare_new_status()
if status_nonempty and not force_delete:
if a_nonempty and not b_nonempty:
raise StorageEmpty(empty_storage=storage_b)
elif not a_nonempty and b_nonempty:
raise StorageEmpty(empty_storage=storage_a)
actions = list(_get_actions(a_info, b_info))
storage_a.buffered()
storage_b.buffered()
for action in actions:
try:
action.run(
a_info,
b_info,
conflict_resolution,
partial_sync
)
except Exception as e:
if error_callback:
error_callback(e)
else:
raise
storage_a.flush()
storage_b.flush()
class Action:
def _run_impl(self, a, b): # pragma: no cover
raise NotImplementedError()
def run(self, a, b, conflict_resolution, partial_sync):
with self.auto_rollback(a, b):
if self.dest.storage.read_only:
if partial_sync == 'error':
raise PartialSync(self.dest.storage)
elif partial_sync == 'ignore':
self.rollback(a, b)
return
else:
assert partial_sync == 'revert'
self._run_impl(a, b)
@contextlib.contextmanager
def auto_rollback(self, a, b):
try:
yield
except BaseException as e:
self.rollback(a, b)
raise e
def rollback(self, a, b):
a.status.parent.rollback(self.ident)
class Upload(Action):
def __init__(self, item, dest):
self.item = item
self.ident = item.ident
self.dest = dest
def _run_impl(self, a, b):
if self.dest.storage.read_only:
href = etag = None
else:
sync_logger.info(u'Copying (uploading) item {} to {}'
.format(self.ident, self.dest.storage))
href, etag = self.dest.storage.upload(self.item)
assert href is not None
self.dest.status.insert_ident(self.ident, ItemMetadata(
href=href,
hash=self.item.hash,
etag=etag
))
class Update(Action):
def __init__(self, item, dest):
self.item = item
self.ident = item.ident
self.dest = dest
def _run_impl(self, a, b):
if self.dest.storage.read_only:
meta = ItemMetadata(hash=self.item.hash)
else:
sync_logger.info(u'Copying (updating) item {} to {}'
.format(self.ident, self.dest.storage))
meta = self.dest.status.get_new(self.ident)
meta.etag = \
self.dest.storage.update(meta.href, self.item, meta.etag)
self.dest.status.update_ident(self.ident, meta)
class Delete(Action):
def __init__(self, ident, dest):
self.ident = ident
self.dest = dest
def _run_impl(self, a, b):
meta = self.dest.status.get_new(self.ident)
if not self.dest.storage.read_only:
sync_logger.info(u'Deleting item {} from {}'
.format(self.ident, self.dest.storage))
self.dest.storage.delete(meta.href, meta.etag)
self.dest.status.remove_ident(self.ident)
class ResolveConflict(Action):
def __init__(self, ident):
self.ident = ident
def run(self, a, b, conflict_resolution, partial_sync):
with self.auto_rollback(a, b):
sync_logger.info(u'Doing conflict resolution for item {}...'
.format(self.ident))
meta_a = a.status.get_new(self.ident)
meta_b = b.status.get_new(self.ident)
if meta_a.hash == meta_b.hash:
sync_logger.info(u'...same content on both sides.')
elif conflict_resolution is None:
raise SyncConflict(ident=self.ident, href_a=meta_a.href,
href_b=meta_b.href)
elif callable(conflict_resolution):
item_a = a.get_item_cache(self.ident)
item_b = b.get_item_cache(self.ident)
new_item = conflict_resolution(item_a, item_b)
if new_item.hash != meta_a.hash:
Update(new_item, a).run(a, b, conflict_resolution,
partial_sync)
if new_item.hash != meta_b.hash:
Update(new_item, b).run(a, b, conflict_resolution,
partial_sync)
else:
raise UserError('Invalid conflict resolution mode: {!r}'
.format(conflict_resolution))
def _get_actions(a_info, b_info):
for ident in uniq(itertools.chain(a_info.status.parent.iter_new(),
a_info.status.parent.iter_old())):
a = a_info.status.get_new(ident)
b = b_info.status.get_new(ident)
if a and b:
a_changed = a_info.is_changed(ident)
b_changed = b_info.is_changed(ident)
if a_changed and b_changed:
# item was modified on both sides
# OR: missing status
yield ResolveConflict(ident)
elif a_changed and not b_changed:
# item was only modified in a
yield Update(a_info.get_item_cache(ident), b_info)
elif not a_changed and b_changed:
# item was only modified in b
yield Update(b_info.get_item_cache(ident), a_info)
elif a and not b:
if a_info.is_changed(ident):
# was deleted from b but modified on a
# OR: new item was created in a
yield Upload(a_info.get_item_cache(ident), b_info)
else:
# was deleted from b and not modified on a
yield Delete(ident, a_info)
elif not a and b:
if b_info.is_changed(ident):
# was deleted from a but modified on b
# OR: new item was created in b
yield Upload(b_info.get_item_cache(ident), a_info)
else:
# was deleted from a and not changed on b
yield Delete(ident, b_info)
| mit |
ravindrapanda/tensorflow | tensorflow/python/profiler/tfprof_logger_test.py | 52 | 2982 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class TFProfLoggerTest(test.TestCase):
def _BuildSmallPlaceholderlModel(self):
a = array_ops.placeholder(dtypes.int32, [2, 2])
b = array_ops.placeholder(dtypes.int32, [2, 2])
y = math_ops.matmul(a, b)
return a, b, y
def _BuildSmallModel(self):
a = constant_op.constant([[1, 2], [3, 4]])
b = constant_op.constant([[1, 2], [3, 4]])
return math_ops.matmul(a, b)
# pylint: disable=pointless-string-statement
"""# TODO(xpan): This this out of core so it doesn't depend on contrib.
def testFillMissingShape(self):
a, b, y = self._BuildSmallPlaceholderlModel()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess = session.Session()
sess.run(y,
options=run_options,
run_metadata=run_metadata,
feed_dict={a: [[1, 2], [2, 3]],
b: [[1, 2], [2, 3]]})
graph2 = ops.Graph()
# Use copy_op_to_graph to remove shape information.
y2 = copy_elements.copy_op_to_graph(y, graph2, [])
self.assertEquals('<unknown>', str(y2.get_shape()))
tfprof_logger._fill_missing_graph_shape(graph2, run_metadata)
self.assertEquals('(2, 2)', str(y2.get_shape()))
def testFailedFillMissingShape(self):
y = self._BuildSmallModel()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess = session.Session()
sess.run(y, options=run_options, run_metadata=run_metadata)
graph2 = ops.Graph()
y2 = copy_elements.copy_op_to_graph(y, graph2, [])
self.assertEquals('<unknown>', str(y2.get_shape()))
# run_metadata has special name for MatMul, hence failed to fill shape.
tfprof_logger._fill_missing_graph_shape(graph2, run_metadata)
self.assertEquals('<unknown>', str(y2.get_shape()))
"""
if __name__ == '__main__':
test.main()
| apache-2.0 |
YOTOV-LIMITED/kuma | vendor/packages/pygments/lexers/c_cpp.py | 72 | 9415 | # -*- coding: utf-8 -*-
"""
pygments.lexers.c_cpp
~~~~~~~~~~~~~~~~~~~~~
Lexers for C/C++ languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, using, \
this, inherit, default, words
from pygments.util import get_bool_opt
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['CLexer', 'CppLexer']
class CFamilyLexer(RegexLexer):
"""
For C family source code. This is used as a base class to avoid repetitious
definitions.
"""
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
#: only one /* */ style comment
_ws1 = r'\s*(?:/[*].*?[*]/\s*)*'
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
('^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws1 + r')(#if\s+0)',
bygroups(using(this), Comment.Preproc), 'if0'),
('^(' + _ws1 + ')(#)',
bygroups(using(this), Comment.Preproc), 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'statements': [
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(words(('auto', 'break', 'case', 'const', 'continue', 'default', 'do',
'else', 'enum', 'extern', 'for', 'goto', 'if', 'register',
'restricted', 'return', 'sizeof', 'static', 'struct',
'switch', 'typedef', 'union', 'volatile', 'while'),
suffix=r'\b'), Keyword),
(r'(bool|int|long|float|short|double|char|unsigned|signed|void|'
r'[a-z_][a-z0-9_]*_t)\b',
Keyword.Type),
(words(('inline', '_inline', '__inline', 'naked', 'restrict',
'thread', 'typename'), suffix=r'\b'), Keyword.Reserved),
# Vector intrinsics
(r'(__m(128i|128d|128|64))\b', Keyword.Reserved),
# Microsoft-isms
(words((
'asm', 'int8', 'based', 'except', 'int16', 'stdcall', 'cdecl',
'fastcall', 'int32', 'declspec', 'finally', 'int64', 'try',
'leave', 'wchar_t', 'w64', 'unaligned', 'raise', 'noop',
'identifier', 'forceinline', 'assume'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved),
(r'(true|false|NULL)\b', Name.Builtin),
(r'([a-zA-Z_]\w*)(\s*)(:)(?!:)', bygroups(Name.Label, Text, Punctuation)),
('[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(\{)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[\w*\s])+?(?:\s|[*]))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')?(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#el(?:se|if).*\n', Comment.Preproc, '#pop'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
stdlib_types = ['size_t', 'ssize_t', 'off_t', 'wchar_t', 'ptrdiff_t',
'sig_atomic_t', 'fpos_t', 'clock_t', 'time_t', 'va_list',
'jmp_buf', 'FILE', 'DIR', 'div_t', 'ldiv_t', 'mbstate_t',
'wctrans_t', 'wint_t', 'wctype_t']
c99_types = ['_Bool', '_Complex', 'int8_t', 'int16_t', 'int32_t', 'int64_t',
'uint8_t', 'uint16_t', 'uint32_t', 'uint64_t', 'int_least8_t',
'int_least16_t', 'int_least32_t', 'int_least64_t',
'uint_least8_t', 'uint_least16_t', 'uint_least32_t',
'uint_least64_t', 'int_fast8_t', 'int_fast16_t', 'int_fast32_t',
'int_fast64_t', 'uint_fast8_t', 'uint_fast16_t', 'uint_fast32_t',
'uint_fast64_t', 'intptr_t', 'uintptr_t', 'intmax_t',
'uintmax_t']
def __init__(self, **options):
self.stdlibhighlighting = get_bool_opt(options, 'stdlibhighlighting', True)
self.c99highlighting = get_bool_opt(options, 'c99highlighting', True)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if self.stdlibhighlighting and value in self.stdlib_types:
token = Keyword.Type
elif self.c99highlighting and value in self.c99_types:
token = Keyword.Type
yield index, token, value
class CLexer(CFamilyLexer):
"""
For C source code with preprocessor directives.
"""
name = 'C'
aliases = ['c']
filenames = ['*.c', '*.h', '*.idc']
mimetypes = ['text/x-chdr', 'text/x-csrc']
priority = 0.1
def analyse_text(text):
if re.search('^\s*#include [<"]', text, re.MULTILINE):
return 0.1
if re.search('^\s*#ifdef ', text, re.MULTILINE):
return 0.1
class CppLexer(CFamilyLexer):
"""
For C++ source code with preprocessor directives.
"""
name = 'C++'
aliases = ['cpp', 'c++']
filenames = ['*.cpp', '*.hpp', '*.c++', '*.h++',
'*.cc', '*.hh', '*.cxx', '*.hxx',
'*.C', '*.H', '*.cp', '*.CPP']
mimetypes = ['text/x-c++hdr', 'text/x-c++src']
priority = 0.1
tokens = {
'statements': [
(words((
'asm', 'catch', 'const_cast', 'delete', 'dynamic_cast', 'explicit',
'export', 'friend', 'mutable', 'namespace', 'new', 'operator',
'private', 'protected', 'public', 'reinterpret_cast',
'restrict', 'static_cast', 'template', 'this', 'throw', 'throws',
'typeid', 'typename', 'using', 'virtual',
'constexpr', 'nullptr', 'decltype', 'thread_local',
'alignas', 'alignof', 'static_assert', 'noexcept', 'override',
'final'), suffix=r'\b'), Keyword),
(r'char(16_t|32_t)\b', Keyword.Type),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
inherit,
],
'root': [
inherit,
# C++ Microsoft-isms
(words(('virtual_inheritance', 'uuidof', 'super', 'single_inheritance',
'multiple_inheritance', 'interface', 'event'),
prefix=r'__', suffix=r'\b'), Keyword.Reserved),
# Offload C++ extensions, http://offload.codeplay.com/
(r'__(offload|blockingoffload|outer)\b', Keyword.Pseudo),
],
'classname': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# template specification
(r'\s*(?=>)', Text, '#pop'),
],
}
def analyse_text(text):
if re.search('#include <[a-z]+>', text):
return 0.2
if re.search('using namespace ', text):
return 0.4
| mpl-2.0 |
automl/SpySMAC | cave/analyzer/base_analyzer.py | 1 | 5492 | import logging
from collections import OrderedDict
from typing import Tuple
from bokeh.io import output_notebook
from bokeh.plotting import show
from cave.html.html_builder import HTMLBuilder
from cave.reader.runs_container import RunsContainer
from cave.utils.exceptions import Deactivated
class BaseAnalyzer(object):
"""
The base class for analyzing methods. To create a new analyzer, inherit from this class and extend.
If you already have an analyzer, but need a wrapper to call it, also inherit it from this class.
You should overwrite the "get_name"-method.
Currently the initialization calls the analysis. After the analyzer ran, the results should be saved to the member
self.result, which is a dictionary with a defined structure.
The docstrings (this part) will be used to display a tooltip / help for the analyzer, so it should be a descriptive
and concise small paragraph describing the analyzer and it's results.
Remember to call super.__init__(runscontainer) in your analyzer's __init__-method. This will initialize the logger,
name and important attributes.
All configurator data is available via the self.runscontainer.
"""
def __init__(self,
runscontainer: RunsContainer,
*args,
**kwargs):
"""
runscontainer: RunsContainer
contains all important information about the configurator runs
"""
self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)
self.name = self.get_name()
self.logger.debug("Initializing %s", self.name)
self.runscontainer = runscontainer
self.result = OrderedDict()
self.error = False
options = self.runscontainer.analyzing_options
if self.name not in options.sections():
self.logger.warning("Please state in the analyzing options whether or not to run this Analyzer "
"(simply add a line to the .ini file containing [{}])".format(self.name))
elif not options[self.name].getboolean('run'):
raise Deactivated("{0} has been deactivated in the options. To enable, just set "
"[{0}][run] = True in the .ini file or pass the appropriate flags.".format(self.name))
self.options = options[self.name]
for k, v in kwargs.items():
if v is not None:
self.options[k] = v
self.logger.debug("{} initialized with options: {}".format(self.name, str(dict(self.options))))
def plot_bokeh(self):
"""
This function should recreate the bokeh-plot from scratch with as little overhead as possible. This is needed to
show the bokeh plot in jupyter AND save it to the webpage. The bokeh plot needs to be recreated to be displayed
in different outputs for reasons beyond out control. So save all analysis results in the class and simply redo
the plotting with this function.
This function needs to be called if bokeh-plots are to be displayed in notebook AND saved to html-result.
"""
raise NotImplementedError()
def get_html(self, d=None, tooltip=None) -> Tuple[str, str]:
"""General reports in html-format, to be easily integrated in html-code. WORKS ALSO FOR BOKEH-OUTPUT.
Parameters
----------
d: Dictionary
a dictionary that will be later turned into a website
tooltip: string
tooltip to be displayed in report. optional, will overwrite the docstrings that are used by default.
Returns
-------
script, div: str, str
header and body part of html-code
"""
if len(self.result) == 1 and None in self.result:
self.logger.debug("Detected None-key, abstracting away...")
self.result = self.result[None]
if d is not None:
d[self.name] = self.result
d[self.name]['tooltip'] = tooltip if tooltip is not None else self.__doc__
script, div = HTMLBuilder("", "", "").add_layer(None, self.result)
combine = "\n\n".join([script, div])
return combine
def get_jupyter(self):
"""Depending on analysis, this creates jupyter-notebook compatible output."""
bokeh_plots = self.check_for_bokeh(self.result)
if bokeh_plots:
self.logger.warning("Bokeh plots cannot be re-used for notebook if they've already been \"components\"'ed. "
"To be sure, get_jupyter should be overwritten for bokeh-producing analyzers.")
output_notebook()
for bokeh_plot in bokeh_plots:
show(bokeh_plot)
else:
from IPython.core.display import HTML, display
display(HTML(self.get_html()))
@classmethod
def check_for_bokeh(cls, d):
"""
Check if there is bokeh-plots in the output of this analyzer by checking the result-dictionary for the bokeh
keyword.
"""
result = [] # all bokeh models
for k, v in d.items():
if isinstance(v, dict):
res = cls.check_for_bokeh(v)
if res:
result.extend(res)
if k == 'bokeh':
result.append(v)
return result
def get_name(self):
return self.__class__.__name__ # Back-up, can be overwritten, will be used as a name for analysis
| bsd-3-clause |
lizardsystem/lizard5-apps | lizard_rainapp/views.py | 1 | 5811 | # (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
# -*- coding: utf-8 -*-
"""Views for the RainApp, mostly a page to upload new region shapefiles."""
# Python 3 is coming
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import io
import logging
import operator
import os
import shutil
import tempfile
import zipfile
import shapefile
from django.contrib.gis.geos import GEOSGeometry
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.views.generic import TemplateView
from django.views.generic import View
from lizard_ui.views import ViewContextMixin
from . import forms
from . import models
logger = logging.getLogger(__name__)
class AdminView(ViewContextMixin, TemplateView):
template_name = "lizard_rainapp/admin.html"
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('lizard_rainapp.change_geoobject'):
raise PermissionDenied()
return super(AdminView, self).dispatch(request, *args, **kwargs)
def get(self, request):
self.form = forms.UploadShapefileForm()
return super(AdminView, self).get(request)
def post(self, request):
self.form = forms.UploadShapefileForm(
request.POST, request.FILES)
if not self.form.is_valid():
return super(AdminView, self).get(request)
try:
self.save_shape()
finally:
self.form.clean_temporary_directory()
return HttpResponseRedirect(
reverse("lizard_rainapp_admin"))
def get_field(self, feature, fieldname, default=None):
try:
name = self.form.cleaned_data[fieldname]
return feature.GetField(
feature.GetFieldIndex(name.encode('utf8')))
except ValueError:
return default
def save_shape(self):
rainappconfig = self.form.cleaned_data['config']
# First, delete old data
models.GeoObject.objects.filter(
config=rainappconfig).delete()
shape = self.form.open_shapefile()
layer = shape.GetLayer()
num_features = 0
for feature in layer:
geom = feature.GetGeometryRef()
models.GeoObject.objects.create(
municipality_id=self.get_field(feature, 'id_field'),
name=self.get_field(feature, 'name_field'),
x=self.get_field(feature, 'x_field'),
y=self.get_field(feature, 'y_field'),
area=self.get_field(feature, 'area_field'),
geometry=GEOSGeometry(geom.ExportToWkt(), srid=4326),
config=rainappconfig)
num_features += 1
logger.debug("Added {} features.".format(num_features))
def rainapp_configs(self):
return models.RainappConfig.objects.all()
class DownloadShapeView(View):
def dispatch(self, request, *args, **kwargs):
if not request.user.has_perm('lizard_rainapp.change_geoobject'):
raise PermissionDenied()
return super(DownloadShapeView, self).dispatch(
request, *args, **kwargs)
def get(self, request, slug):
try:
rainappconfig = models.RainappConfig.objects.get(
slug=slug)
except models.RainappConfig.DoesNotExist:
raise Http404()
if not rainappconfig.has_geoobjects:
raise Http404()
bytebuffer = self.save_data_to_zip(rainappconfig)
# Setup HTTPResponse for returning a zip file
response = HttpResponse(content_type='application/zip')
response['Content-Disposition'] = (
'attachment; filename={}.zip'.format(slug))
response.write(bytebuffer.read())
return response
def save_data_to_zip(self, rainappconfig):
# Save a shapefile to a temp directory
temp_dir = tempfile.mkdtemp()
try:
shapefile_path = os.path.join(
temp_dir, rainappconfig.slug)
shp = shapefile.Writer(shapefile.POLYGON)
shp.field(b'ID_NS')
shp.field(b'ID')
shp.field(b'X', b'F', 11, 5)
shp.field(b'Y', b'F', 11, 5)
shp.field(b'AREA', b'F', 11, 5)
for geo in models.GeoObject.objects.filter(config=rainappconfig):
if str(geo.geometry).startswith('MULTIPOLYGON'):
# For pyshp, multipolygons are basically normal
# polygons with all the parts after each other. Meaning
# we need to add them together them by hand.
geometry = [
[list(l) for l in polygon] for polygon in geo.geometry]
geometry = reduce(operator.add, geometry, [])
else:
geometry = [list(l) for l in geo.geometry]
shp.poly(parts=geometry)
shp.record(
geo.municipality_id,
geo.name,
geo.x,
geo.y,
geo.area)
shp.save(shapefile_path)
# Create a zipfile in a BytesIO buffer
bytebuffer = io.BytesIO()
zipf = zipfile.ZipFile(bytebuffer, 'w', zipfile.ZIP_DEFLATED)
for filename in os.listdir(temp_dir):
zipf.write(os.path.join(temp_dir, filename), filename)
zipf.close()
bytebuffer.seek(0)
return bytebuffer
finally:
# Remove temporary directory
shutil.rmtree(temp_dir)
| lgpl-3.0 |
mrshelly/openerp71313 | openerp/modules/loading.py | 1 | 19716 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules (also called addons) management.
"""
import itertools
import logging
import os
import sys
import threading
import openerp
import openerp.modules.db
import openerp.modules.graph
import openerp.modules.migration
import openerp.osv as osv
import openerp.pooler as pooler
import openerp.release as release
import openerp.tools as tools
from openerp import SUPERUSER_ID
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
from openerp.modules.module import initialize_sys_path, \
load_openerp_module, init_module_models
_logger = logging.getLogger(__name__)
def open_openerp_namespace():
# See comment for open_openerp_namespace.
if openerp.conf.deprecation.open_openerp_namespace:
for k, v in list(sys.modules.items()):
if k.startswith('openerp.') and sys.modules.get(k[8:]) is None:
sys.modules[k[8:]] = v
def load_module_graph(cr, graph, status=None, perform_checks=True, skip_modules=None, report=None):
"""Migrates+Updates or Installs all module nodes from ``graph``
:param graph: graph of module nodes to load
:param status: status dictionary for keeping track of progress
:param perform_checks: whether module descriptors should be checked for validity (prints warnings
for same cases)
:param skip_modules: optional list of module names (packages) which have previously been loaded and can be skipped
:return: list of modules that were installed or updated
"""
def process_sql_file(cr, fp):
queries = fp.read().split(';')
for query in queries:
new_query = ' '.join(query.split())
if new_query:
cr.execute(new_query)
load_init_xml = lambda *args: _load_data(cr, *args, kind='init_xml')
load_update_xml = lambda *args: _load_data(cr, *args, kind='update_xml')
load_demo_xml = lambda *args: _load_data(cr, *args, kind='demo_xml')
load_data = lambda *args: _load_data(cr, *args, kind='data')
load_demo = lambda *args: _load_data(cr, *args, kind='demo')
def load_test(module_name, idref, mode):
cr.commit()
try:
threading.currentThread().testing = True
_load_data(cr, module_name, idref, mode, 'test')
return True
except Exception:
_logger.exception(
'module %s: an exception occurred in a test', module_name)
return False
finally:
threading.currentThread().testing = False
if tools.config.options['test_commit']:
cr.commit()
else:
cr.rollback()
def _load_data(cr, module_name, idref, mode, kind):
"""
kind: data, demo, test, init_xml, update_xml, demo_xml.
noupdate is False, unless it is demo data or it is csv data in
init mode.
"""
for filename in package.data[kind]:
if kind == 'test':
_logger.log(logging.TEST, "module %s: loading %s", module_name, filename)
else:
_logger.info("module %s: loading %s", module_name, filename)
_, ext = os.path.splitext(filename)
pathname = os.path.join(module_name, filename)
fp = tools.file_open(pathname)
noupdate = False
if kind in ('demo', 'demo_xml'):
noupdate = True
try:
ext = ext.lower()
if ext == '.csv':
if kind in ('init', 'init_xml'):
noupdate = True
tools.convert_csv_import(cr, module_name, pathname, fp.read(), idref, mode, noupdate)
elif ext == '.sql':
process_sql_file(cr, fp)
elif ext == '.yml':
tools.convert_yaml_import(cr, module_name, fp, kind, idref, mode, noupdate, report)
elif ext == '.xml':
tools.convert_xml_import(cr, module_name, fp, idref, mode, noupdate, report)
elif ext == '.js':
pass # .js files are valid but ignored here.
else:
_logger.warning("Can't load unknown file type %s.", filename)
finally:
fp.close()
if status is None:
status = {}
processed_modules = []
loaded_modules = []
pool = pooler.get_pool(cr.dbname)
migrations = openerp.modules.migration.MigrationManager(cr, graph)
_logger.debug('loading %d packages...', len(graph))
# Query manual fields for all models at once and save them on the registry
# so the initialization code for each model does not have to do it
# one model at a time.
pool.fields_by_model = {}
cr.execute('SELECT * FROM ir_model_fields WHERE state=%s', ('manual',))
for field in cr.dictfetchall():
pool.fields_by_model.setdefault(field['model'], []).append(field)
# register, instantiate and initialize models for each modules
for index, package in enumerate(graph):
module_name = package.name
module_id = package.id
if skip_modules and module_name in skip_modules:
continue
_logger.info('module %s: loading objects', package.name)
migrations.migrate_module(package, 'pre')
load_openerp_module(package.name)
models = pool.load(cr, package)
loaded_modules.append(package.name)
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
init_module_models(cr, package.name, models)
pool._init_modules.add(package.name)
status['progress'] = float(index) / len(graph)
# Can't put this line out of the loop: ir.module.module will be
# registered by init_module_models() above.
modobj = pool.get('ir.module.module')
if perform_checks:
modobj.check(cr, SUPERUSER_ID, [module_id])
idref = {}
mode = 'update'
if hasattr(package, 'init') or package.state == 'to install':
mode = 'init'
if hasattr(package, 'init') or hasattr(package, 'update') or package.state in ('to install', 'to upgrade'):
if package.state=='to upgrade':
# upgrading the module information
modobj.write(cr, SUPERUSER_ID, [module_id], modobj.get_values_from_terp(package.data))
load_init_xml(module_name, idref, mode)
load_update_xml(module_name, idref, mode)
load_data(module_name, idref, mode)
if hasattr(package, 'demo') or (package.dbdemo and package.state != 'installed'):
status['progress'] = (index + 0.75) / len(graph)
load_demo_xml(module_name, idref, mode)
load_demo(module_name, idref, mode)
cr.execute('update ir_module_module set demo=%s where id=%s', (True, module_id))
# launch tests only in demo mode, as most tests will depend
# on demo data. Other tests can be added into the regular
# 'data' section, but should probably not alter the data,
# as there is no rollback.
if tools.config.options['test_enable']:
report.record_result(load_test(module_name, idref, mode))
# Run the `fast_suite` and `checks` tests given by the module.
if module_name == 'base':
# Also run the core tests after the database is created.
report.record_result(openerp.modules.module.run_unit_tests('openerp'))
report.record_result(openerp.modules.module.run_unit_tests(module_name))
processed_modules.append(package.name)
migrations.migrate_module(package, 'post')
ver = release.major_version + '.' + package.data['version']
# Set new modules and dependencies
modobj.write(cr, SUPERUSER_ID, [module_id], {'state': 'installed', 'latest_version': ver})
# Update translations for all installed languages
modobj.update_translations(cr, SUPERUSER_ID, [module_id], None)
package.state = 'installed'
for kind in ('init', 'demo', 'update'):
if hasattr(package, kind):
delattr(package, kind)
cr.commit()
# The query won't be valid for models created later (i.e. custom model
# created after the registry has been loaded), so empty its result.
pool.fields_by_model = None
cr.commit()
return loaded_modules, processed_modules
def _check_module_names(cr, module_names):
mod_names = set(module_names)
if 'base' in mod_names:
# ignore dummy 'all' module
if 'all' in mod_names:
mod_names.remove('all')
if mod_names:
cr.execute("SELECT count(id) AS count FROM ir_module_module WHERE name in %s", (tuple(mod_names),))
if cr.dictfetchone()['count'] != len(mod_names):
# find out what module name(s) are incorrect:
cr.execute("SELECT name FROM ir_module_module")
incorrect_names = mod_names.difference([x['name'] for x in cr.dictfetchall()])
_logger.warning('invalid module names, ignored: %s', ", ".join(incorrect_names))
def load_marked_modules(cr, graph, states, force, progressdict, report, loaded_modules, perform_checks):
"""Loads modules marked with ``states``, adding them to ``graph`` and
``loaded_modules`` and returns a list of installed/upgraded modules."""
processed_modules = []
while True:
cr.execute("SELECT name from ir_module_module WHERE state IN %s" ,(tuple(states),))
module_list = [name for (name,) in cr.fetchall() if name not in graph]
graph.add_modules(cr, module_list, force)
_logger.debug('Updating graph with %d more modules', len(module_list))
loaded, processed = load_module_graph(cr, graph, progressdict, report=report, skip_modules=loaded_modules, perform_checks=perform_checks)
processed_modules.extend(processed)
loaded_modules.extend(loaded)
if not processed: break
return processed_modules
def load_modules(db, force_demo=False, status=None, update_module=False):
# TODO status['progress'] reporting is broken: used twice (and reset each
# time to zero) in load_module_graph, not fine-grained enough.
# It should be a method exposed by the pool.
initialize_sys_path()
open_openerp_namespace()
force = []
if force_demo:
force.append('demo')
cr = db.cursor()
try:
if not openerp.modules.db.is_initialized(cr):
_logger.info("init db")
openerp.modules.db.initialize(cr)
tools.config["init"]["all"] = 1
tools.config['update']['all'] = 1
if not tools.config['without_demo']:
tools.config["demo"]['all'] = 1
# This is a brand new pool, just created in pooler.get_db_and_pool()
pool = pooler.get_pool(cr.dbname)
if 'base' in tools.config['update'] or 'all' in tools.config['update']:
cr.execute("update ir_module_module set state=%s where name=%s and state=%s", ('to upgrade', 'base', 'installed'))
# STEP 1: LOAD BASE (must be done before module dependencies can be computed for later steps)
graph = openerp.modules.graph.Graph()
graph.add_module(cr, 'base', force)
if not graph:
_logger.critical('module base cannot be loaded! (hint: verify addons-path)')
raise osv.osv.except_osv(_('Could not load base module'), _('module base cannot be loaded! (hint: verify addons-path)'))
# processed_modules: for cleanup step after install
# loaded_modules: to avoid double loading
report = pool._assertion_report
loaded_modules, processed_modules = load_module_graph(cr, graph, status, perform_checks=update_module, report=report)
if tools.config['load_language']:
for lang in tools.config['load_language'].split(','):
tools.load_language(cr, lang)
# STEP 2: Mark other modules to be loaded/updated
if update_module:
modobj = pool.get('ir.module.module')
if ('base' in tools.config['init']) or ('base' in tools.config['update']):
_logger.info('updating modules list')
modobj.update_list(cr, SUPERUSER_ID)
_check_module_names(cr, itertools.chain(tools.config['init'].keys(), tools.config['update'].keys()))
mods = [k for k in tools.config['init'] if tools.config['init'][k]]
if mods:
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'uninstalled'), ('name', 'in', mods)])
if ids:
modobj.button_install(cr, SUPERUSER_ID, ids)
mods = [k for k in tools.config['update'] if tools.config['update'][k]]
if mods:
ids = modobj.search(cr, SUPERUSER_ID, ['&', ('state', '=', 'installed'), ('name', 'in', mods)])
if ids:
modobj.button_upgrade(cr, SUPERUSER_ID, ids)
cr.execute("update ir_module_module set state=%s where name=%s", ('installed', 'base'))
# STEP 3: Load marked modules (skipping base which was done in STEP 1)
# IMPORTANT: this is done in two parts, first loading all installed or
# partially installed modules (i.e. installed/to upgrade), to
# offer a consistent system to the second part: installing
# newly selected modules.
# We include the modules 'to remove' in the first step, because
# they are part of the "currently installed" modules. They will
# be dropped in STEP 6 later, before restarting the loading
# process.
states_to_load = ['installed', 'to upgrade', 'to remove']
processed = load_marked_modules(cr, graph, states_to_load, force, status, report, loaded_modules, update_module)
processed_modules.extend(processed)
if update_module:
states_to_load = ['to install']
processed = load_marked_modules(cr, graph, states_to_load, force, status, report, loaded_modules, update_module)
processed_modules.extend(processed)
# load custom models
cr.execute('select model from ir_model where state=%s', ('manual',))
for model in cr.dictfetchall():
pool.get('ir.model').instanciate(cr, SUPERUSER_ID, model['model'], {})
# STEP 4: Finish and cleanup installations
if processed_modules:
cr.execute("""select model,name from ir_model where id NOT IN (select distinct model_id from ir_model_access)""")
for (model, name) in cr.fetchall():
model_obj = pool.get(model)
if model_obj and not model_obj.is_transient():
_logger.warning('The model %s has no access rules, consider adding one. E.g. access_%s,access_%s,model_%s,,1,1,1,1',
model, model.replace('.', '_'), model.replace('.', '_'), model.replace('.', '_'))
# Temporary warning while we remove access rights on osv_memory objects, as they have
# been replaced by owner-only access rights
cr.execute("""select distinct mod.model, mod.name from ir_model_access acc, ir_model mod where acc.model_id = mod.id""")
for (model, name) in cr.fetchall():
model_obj = pool.get(model)
if model_obj and model_obj.is_transient():
_logger.warning('The transient model %s (%s) should not have explicit access rules!', model, name)
cr.execute("SELECT model from ir_model")
for (model,) in cr.fetchall():
obj = pool.get(model)
if obj:
obj._check_removed_columns(cr, log=True)
else:
_logger.warning("Model %s is declared but cannot be loaded! (Perhaps a module was partially removed or renamed)", model)
# Cleanup orphan records
pool.get('ir.model.data')._process_end(cr, SUPERUSER_ID, processed_modules)
for kind in ('init', 'demo', 'update'):
tools.config[kind] = {}
cr.commit()
# STEP 5: Cleanup menus
# Remove menu items that are not referenced by any of other
# (child) menu item, ir_values, or ir_model_data.
# TODO: This code could be a method of ir_ui_menu. Remove menu without actions of children
if update_module:
while True:
cr.execute('''delete from
ir_ui_menu
where
(id not IN (select parent_id from ir_ui_menu where parent_id is not null))
and
(id not IN (select res_id from ir_values where model='ir.ui.menu'))
and
(id not IN (select res_id from ir_model_data where model='ir.ui.menu'))''')
cr.commit()
if not cr.rowcount:
break
else:
_logger.info('removed %d unused menus', cr.rowcount)
# STEP 6: Uninstall modules to remove
if update_module:
# Remove records referenced from ir_model_data for modules to be
# removed (and removed the references from ir_model_data).
cr.execute("SELECT id FROM ir_module_module WHERE state=%s", ('to remove',))
mod_ids_to_remove = [x[0] for x in cr.fetchall()]
if mod_ids_to_remove:
pool.get('ir.module.module').module_uninstall(cr, SUPERUSER_ID, mod_ids_to_remove)
# Recursive reload, should only happen once, because there should be no
# modules to remove next time
cr.commit()
_logger.info('Reloading registry once more after uninstalling modules')
return pooler.restart_pool(cr.dbname, force_demo, status, update_module)
if report.failures:
_logger.error('At least one test failed when loading the modules.')
else:
_logger.info('Modules loaded.')
# STEP 7: call _register_hook on every model
for model in pool.models.values():
model._register_hook(cr)
finally:
cr.close()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
negativo/magic-wormhole | src/wormhole/wordlist.py | 5 | 9117 |
# The PGP Word List, which maps bytes to phonetically-distinct words. There
# are two lists, even and odd, and encodings should alternate between then to
# detect dropped words. https://en.wikipedia.org/wiki/PGP_Words
# Thanks to Warren Guy for transcribing them:
# https://github.com/warrenguy/javascript-pgp-word-list
from binascii import unhexlify
raw_words = {
'00': ['aardvark', 'adroitness'], '01': ['absurd', 'adviser'],
'02': ['accrue', 'aftermath'], '03': ['acme', 'aggregate'],
'04': ['adrift', 'alkali'], '05': ['adult', 'almighty'],
'06': ['afflict', 'amulet'], '07': ['ahead', 'amusement'],
'08': ['aimless', 'antenna'], '09': ['Algol', 'applicant'],
'0A': ['allow', 'Apollo'], '0B': ['alone', 'armistice'],
'0C': ['ammo', 'article'], '0D': ['ancient', 'asteroid'],
'0E': ['apple', 'Atlantic'], '0F': ['artist', 'atmosphere'],
'10': ['assume', 'autopsy'], '11': ['Athens', 'Babylon'],
'12': ['atlas', 'backwater'], '13': ['Aztec', 'barbecue'],
'14': ['baboon', 'belowground'], '15': ['backfield', 'bifocals'],
'16': ['backward', 'bodyguard'], '17': ['banjo', 'bookseller'],
'18': ['beaming', 'borderline'], '19': ['bedlamp', 'bottomless'],
'1A': ['beehive', 'Bradbury'], '1B': ['beeswax', 'bravado'],
'1C': ['befriend', 'Brazilian'], '1D': ['Belfast', 'breakaway'],
'1E': ['berserk', 'Burlington'], '1F': ['billiard', 'businessman'],
'20': ['bison', 'butterfat'], '21': ['blackjack', 'Camelot'],
'22': ['blockade', 'candidate'], '23': ['blowtorch', 'cannonball'],
'24': ['bluebird', 'Capricorn'], '25': ['bombast', 'caravan'],
'26': ['bookshelf', 'caretaker'], '27': ['brackish', 'celebrate'],
'28': ['breadline', 'cellulose'], '29': ['breakup', 'certify'],
'2A': ['brickyard', 'chambermaid'], '2B': ['briefcase', 'Cherokee'],
'2C': ['Burbank', 'Chicago'], '2D': ['button', 'clergyman'],
'2E': ['buzzard', 'coherence'], '2F': ['cement', 'combustion'],
'30': ['chairlift', 'commando'], '31': ['chatter', 'company'],
'32': ['checkup', 'component'], '33': ['chisel', 'concurrent'],
'34': ['choking', 'confidence'], '35': ['chopper', 'conformist'],
'36': ['Christmas', 'congregate'], '37': ['clamshell', 'consensus'],
'38': ['classic', 'consulting'], '39': ['classroom', 'corporate'],
'3A': ['cleanup', 'corrosion'], '3B': ['clockwork', 'councilman'],
'3C': ['cobra', 'crossover'], '3D': ['commence', 'crucifix'],
'3E': ['concert', 'cumbersome'], '3F': ['cowbell', 'customer'],
'40': ['crackdown', 'Dakota'], '41': ['cranky', 'decadence'],
'42': ['crowfoot', 'December'], '43': ['crucial', 'decimal'],
'44': ['crumpled', 'designing'], '45': ['crusade', 'detector'],
'46': ['cubic', 'detergent'], '47': ['dashboard', 'determine'],
'48': ['deadbolt', 'dictator'], '49': ['deckhand', 'dinosaur'],
'4A': ['dogsled', 'direction'], '4B': ['dragnet', 'disable'],
'4C': ['drainage', 'disbelief'], '4D': ['dreadful', 'disruptive'],
'4E': ['drifter', 'distortion'], '4F': ['dropper', 'document'],
'50': ['drumbeat', 'embezzle'], '51': ['drunken', 'enchanting'],
'52': ['Dupont', 'enrollment'], '53': ['dwelling', 'enterprise'],
'54': ['eating', 'equation'], '55': ['edict', 'equipment'],
'56': ['egghead', 'escapade'], '57': ['eightball', 'Eskimo'],
'58': ['endorse', 'everyday'], '59': ['endow', 'examine'],
'5A': ['enlist', 'existence'], '5B': ['erase', 'exodus'],
'5C': ['escape', 'fascinate'], '5D': ['exceed', 'filament'],
'5E': ['eyeglass', 'finicky'], '5F': ['eyetooth', 'forever'],
'60': ['facial', 'fortitude'], '61': ['fallout', 'frequency'],
'62': ['flagpole', 'gadgetry'], '63': ['flatfoot', 'Galveston'],
'64': ['flytrap', 'getaway'], '65': ['fracture', 'glossary'],
'66': ['framework', 'gossamer'], '67': ['freedom', 'graduate'],
'68': ['frighten', 'gravity'], '69': ['gazelle', 'guitarist'],
'6A': ['Geiger', 'hamburger'], '6B': ['glitter', 'Hamilton'],
'6C': ['glucose', 'handiwork'], '6D': ['goggles', 'hazardous'],
'6E': ['goldfish', 'headwaters'], '6F': ['gremlin', 'hemisphere'],
'70': ['guidance', 'hesitate'], '71': ['hamlet', 'hideaway'],
'72': ['highchair', 'holiness'], '73': ['hockey', 'hurricane'],
'74': ['indoors', 'hydraulic'], '75': ['indulge', 'impartial'],
'76': ['inverse', 'impetus'], '77': ['involve', 'inception'],
'78': ['island', 'indigo'], '79': ['jawbone', 'inertia'],
'7A': ['keyboard', 'infancy'], '7B': ['kickoff', 'inferno'],
'7C': ['kiwi', 'informant'], '7D': ['klaxon', 'insincere'],
'7E': ['locale', 'insurgent'], '7F': ['lockup', 'integrate'],
'80': ['merit', 'intention'], '81': ['minnow', 'inventive'],
'82': ['miser', 'Istanbul'], '83': ['Mohawk', 'Jamaica'],
'84': ['mural', 'Jupiter'], '85': ['music', 'leprosy'],
'86': ['necklace', 'letterhead'], '87': ['Neptune', 'liberty'],
'88': ['newborn', 'maritime'], '89': ['nightbird', 'matchmaker'],
'8A': ['Oakland', 'maverick'], '8B': ['obtuse', 'Medusa'],
'8C': ['offload', 'megaton'], '8D': ['optic', 'microscope'],
'8E': ['orca', 'microwave'], '8F': ['payday', 'midsummer'],
'90': ['peachy', 'millionaire'], '91': ['pheasant', 'miracle'],
'92': ['physique', 'misnomer'], '93': ['playhouse', 'molasses'],
'94': ['Pluto', 'molecule'], '95': ['preclude', 'Montana'],
'96': ['prefer', 'monument'], '97': ['preshrunk', 'mosquito'],
'98': ['printer', 'narrative'], '99': ['prowler', 'nebula'],
'9A': ['pupil', 'newsletter'], '9B': ['puppy', 'Norwegian'],
'9C': ['python', 'October'], '9D': ['quadrant', 'Ohio'],
'9E': ['quiver', 'onlooker'], '9F': ['quota', 'opulent'],
'A0': ['ragtime', 'Orlando'], 'A1': ['ratchet', 'outfielder'],
'A2': ['rebirth', 'Pacific'], 'A3': ['reform', 'pandemic'],
'A4': ['regain', 'Pandora'], 'A5': ['reindeer', 'paperweight'],
'A6': ['rematch', 'paragon'], 'A7': ['repay', 'paragraph'],
'A8': ['retouch', 'paramount'], 'A9': ['revenge', 'passenger'],
'AA': ['reward', 'pedigree'], 'AB': ['rhythm', 'Pegasus'],
'AC': ['ribcage', 'penetrate'], 'AD': ['ringbolt', 'perceptive'],
'AE': ['robust', 'performance'], 'AF': ['rocker', 'pharmacy'],
'B0': ['ruffled', 'phonetic'], 'B1': ['sailboat', 'photograph'],
'B2': ['sawdust', 'pioneer'], 'B3': ['scallion', 'pocketful'],
'B4': ['scenic', 'politeness'], 'B5': ['scorecard', 'positive'],
'B6': ['Scotland', 'potato'], 'B7': ['seabird', 'processor'],
'B8': ['select', 'provincial'], 'B9': ['sentence', 'proximate'],
'BA': ['shadow', 'puberty'], 'BB': ['shamrock', 'publisher'],
'BC': ['showgirl', 'pyramid'], 'BD': ['skullcap', 'quantity'],
'BE': ['skydive', 'racketeer'], 'BF': ['slingshot', 'rebellion'],
'C0': ['slowdown', 'recipe'], 'C1': ['snapline', 'recover'],
'C2': ['snapshot', 'repellent'], 'C3': ['snowcap', 'replica'],
'C4': ['snowslide', 'reproduce'], 'C5': ['solo', 'resistor'],
'C6': ['southward', 'responsive'], 'C7': ['soybean', 'retraction'],
'C8': ['spaniel', 'retrieval'], 'C9': ['spearhead', 'retrospect'],
'CA': ['spellbind', 'revenue'], 'CB': ['spheroid', 'revival'],
'CC': ['spigot', 'revolver'], 'CD': ['spindle', 'sandalwood'],
'CE': ['spyglass', 'sardonic'], 'CF': ['stagehand', 'Saturday'],
'D0': ['stagnate', 'savagery'], 'D1': ['stairway', 'scavenger'],
'D2': ['standard', 'sensation'], 'D3': ['stapler', 'sociable'],
'D4': ['steamship', 'souvenir'], 'D5': ['sterling', 'specialist'],
'D6': ['stockman', 'speculate'], 'D7': ['stopwatch', 'stethoscope'],
'D8': ['stormy', 'stupendous'], 'D9': ['sugar', 'supportive'],
'DA': ['surmount', 'surrender'], 'DB': ['suspense', 'suspicious'],
'DC': ['sweatband', 'sympathy'], 'DD': ['swelter', 'tambourine'],
'DE': ['tactics', 'telephone'], 'DF': ['talon', 'therapist'],
'E0': ['tapeworm', 'tobacco'], 'E1': ['tempest', 'tolerance'],
'E2': ['tiger', 'tomorrow'], 'E3': ['tissue', 'torpedo'],
'E4': ['tonic', 'tradition'], 'E5': ['topmost', 'travesty'],
'E6': ['tracker', 'trombonist'], 'E7': ['transit', 'truncated'],
'E8': ['trauma', 'typewriter'], 'E9': ['treadmill', 'ultimate'],
'EA': ['Trojan', 'undaunted'], 'EB': ['trouble', 'underfoot'],
'EC': ['tumor', 'unicorn'], 'ED': ['tunnel', 'unify'],
'EE': ['tycoon', 'universe'], 'EF': ['uncut', 'unravel'],
'F0': ['unearth', 'upcoming'], 'F1': ['unwind', 'vacancy'],
'F2': ['uproot', 'vagabond'], 'F3': ['upset', 'vertigo'],
'F4': ['upshot', 'Virginia'], 'F5': ['vapor', 'visitor'],
'F6': ['village', 'vocalist'], 'F7': ['virus', 'voyager'],
'F8': ['Vulcan', 'warranty'], 'F9': ['waffle', 'Waterloo'],
'FA': ['wallet', 'whimsical'], 'FB': ['watchword', 'Wichita'],
'FC': ['wayside', 'Wilmington'], 'FD': ['willow', 'Wyoming'],
'FE': ['woodlark', 'yesteryear'], 'FF': ['Zulu', 'Yucatan']
};
byte_to_even_word = dict([(unhexlify(k), both_words[0])
for k,both_words
in raw_words.items()])
byte_to_odd_word = dict([(unhexlify(k), both_words[1])
for k,both_words
in raw_words.items()])
even_words_lowercase, odd_words_lowercase = set(), set()
even_words_lowercase_to_byte, odd_words_lowercase_to_byte = dict(), dict()
for k,both_words in raw_words.items():
even_word, odd_word = both_words
even_words_lowercase.add(even_word.lower())
even_words_lowercase_to_byte[even_word.lower()] = unhexlify(k)
odd_words_lowercase.add(odd_word.lower())
odd_words_lowercase_to_byte[odd_word.lower()] = unhexlify(k)
| mit |
jamesbdunlop/Nucleolus | nebula_ui/nMayaui_core/colorSelector.py | 2 | 4394 | import logging, sys
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from PySide2.QtCore import Signal
from functools import partial
logging.basicConfig()
logger = logging.getLogger(__name__)
class ColorSelector(QWidget):
mayaColorIndex = Signal(int)
def __init__(self, parent=None, button=None, colCount=5):
"""
Builds a gridLayout for the 31 color indexes for the maya overrides colors.
Changing colCount will change the total number of columns for the QGridLayout the pushButtons get built into.
@param button: The QPushButton used to open the grid. This is used to change the color of the button to the color selected frmo the grid.
@param colCount: The number of columns to build to.
@type button: QPushButton
@type colCount: Int
"""
QWidget.__init__(self, parent=parent)
logger.info('Building ColorSelector win...')
self.setWindowTitle('overrideColors')
self.menuLayout = QGridLayout(self)
self.setWindowFlags(Qt.WindowStaysOnTopHint)
self.colCount = colCount
self.button = button
self.mayaColor = 15
self.rgbColor = [0, 0, 0]
self.qtColor = None
self.r = 0
self.c = 1
self.colorsDict = {0: [0.0, 0.0, 0.0],
1: [0.0, 0.0, 0.0],
2: [.75, .75, .75],
3: [0.5, 0.5, 0.5],
4: [0.8, 0.0, 0.2],
5: [0.0, 0.0, 0.4],
6: [0.0, 0.0, 1.0],
7: [0.0, 0.3, 0.0],
8: [0.2, 0, 0.3],
9: [.8, 0, .8 ],
10: [0.6, 0.3, 0.2],
11: [0.25, 0.13, 0.13],
12: [0.7, .2, 0],
13: [1.0, 0, 0],
14: [0, 1.0, 0],
15: [0, 0.3, 0.6],
16: [1, 1, 1],
17: [1, 1, 0],
18: [0, 1, 1],
19: [0, 1, .8],
20: [1, .7, .7],
21: [0.9, .7, .5],
22: [1, 1, 0.4],
23: [0, 0.7, .4],
24: [.6, .4, .2],
25: [.63, .63, .17],
26: [0.4, 0.6, 0.2],
27: [0.2, 0.63, 0.35],
28: [0.18, 0.63, 0.63],
29: [0.18, 0.4, 0.63],
30: [0.43, 0.18, 0.63],
31: [0.63, 0.18, 0.4]
}
for key, var in self.colorsDict.items():
if self.c == self.colCount:
self.r = self.r + 1
self.c = 1
## Build color buttons into the grid layout
self.myColor = QColor()
self.myColor.setRgbF(var[0],var[1], var[2])
self.colorButton = QPushButton(self)
self.colorButton.setFixedWidth(25)
self.colorButton.setObjectName('{}'.format(key))
self.colorButton.setText('{}'.format(key))
self.colorButton.released.connect(partial(self._releasedButton, self.colorButton, self.myColor))
self.colorButton.setStyleSheet("QPushButton{background: %s; height:15px}" % self.myColor.name())
self.menuLayout.addWidget(self.colorButton, self.r, self.c)
self.c = self.c + 1
logger.info('Built ColorSelector win successfully...')
def _releasedButton(self, colorButton, myColor):
if self.button is not None:
self.button.setStyleSheet("QPushButton{background:%s; height:15px}" % myColor.name())
self.rgbColor = myColor.getRgb()
self.qtColor = myColor.name()
self.mayaColor = int(colorButton.objectName())
self.mayaColorIndex.emit(self.mayaColor)
self.close()
@property
def color(self):
return self.mayaColor
@color.setter
def color(self, color):
self.mayaColor = color
if __name__ == '__main__':
qtapp = QApplication(sys.argv)
myUI = ColorSelector()
myUI.show()
qtapp.exec_()
| apache-2.0 |
xigt/freki | freki/text2freki.py | 1 | 6059 | from freki.serialize import FrekiDoc, FrekiBlock, FrekiLine
import codecs
import re
import chardet
import logging
import argparse
def run(args):
frek = read_and_convert(args.infile, args.igtfile, args.encoding, args.detect)
out = open(args.outfile, 'w', encoding='utf8')
out.write(str(frek))
def convert_text(doc_id, text, span_text=None):
"""
Convert a string to freki
:param doc_id: name of document
:param text: text of document
:param span_text: text identifying IGT spans, if available
:return: freki object
"""
w_index = 1
wo_index = 1
pre2post = {}
for line in re.split('\r\n|\n', text):
if not re.match('^\s*$', line):
pre2post[w_index] = wo_index
wo_index += 1
w_index += 1
line_dict = {}
s_index = 0
if span_text:
for line in span_text.split('\n'):
if len(line):
parts = line.split()
tags = parts[2:]
start = int(parts[0])
for i in range(start, int(parts[1]) + 1):
try:
num = pre2post[i]
except KeyError:
print("Warning: a line specified in the igt file is a blank line in the document. "
"Check the line numbers in the igt file. Skipping the problem line.")
break
line_dict[num] = (tags[num - start], 's' + str(s_index))
s_index += 1
frek = FrekiDoc()
text = re.sub(r'(\r\n|\n){2,}', '\n\n', text)
blocks = re.split('\n\n', text)
index = 1
b_index = 1
for para in blocks:
lines = re.split('\r\n|\n', para)
linenos = []
for line in lines:
f_line = FrekiLine(line)
f_line.attrs['line'] = index
linenos.append(index)
if index in line_dict:
f_line.attrs['tag'] = line_dict[index][0]
f_line.attrs['span_id'] = line_dict[index][1]
frek.add_line(f_line)
index += 1
block = FrekiBlock(linenos, linenos[0], linenos[-1], frek)
block._attrs['page'] = '1'
block._attrs['block_id'] = 'b' + str(b_index)
block._attrs['doc_id'] = doc_id
b_index += 1
frek.add_block(block)
return frek
def read_and_convert(path, igt_path=None, encoding='utf-8', detect_encoding=False):
"""
Read in a text file and convert it to freki. igt_path file format: startline endline tag1 tag2 ... tagN\n
:param path: path to the text file
:param igt_path: path to the text file containing IGT span info
:param encoding: name of the encoding of the file
:param detect_encoding: setting to true will first detect an encoding rather than using the default.
:return: freki object
"""
name = path.split('/')[-1].split('.')[0]
igt_text = None
if detect_encoding:
bytes = open(path, 'rb').read()
p_predict = chardet.detect(bytes)
text = codecs.open(path, encoding=p_predict['encoding'], errors='strict').read()
if igt_path:
i_predict = chardet.detect(open(igt_path, 'rb').read())
igt_text = codecs.open(igt_path, encoding=i_predict['encoding']).read()
logging.info('Using encoding: ' + p_predict['encoding'])
logging.info('Encoding detection uses the Chardet library: https://pypi.python.org/pypi/chardet')
else:
try:
text = codecs.open(path, encoding=encoding, errors='strict').read()
if igt_path:
igt_text = codecs.open(igt_path, encoding=encoding).read()
except UnicodeDecodeError:
bytes = open(path, 'rb').read()
p_predict = chardet.detect(bytes)
text = codecs.open(path, encoding=p_predict['encoding'], errors='strict').read()
if igt_path:
i_predict = chardet.detect(open(igt_path, 'rb').read())
igt_text = codecs.open(igt_path, encoding=i_predict['encoding']).read()
logging.info('The file cannot be read using encoding ' + encoding + '. Instead using ' + p_predict['encoding'])
logging.info('Encoding detection uses the Chardet library: https://pypi.python.org/pypi/chardet\n')
logging.info("If encoding " + p_predict['encoding'] + ' is not correct please specify the encoding as an argument')
logging.info('For a detailed list of encodings available in Python visit https://docs.python.org/2.4/lib/standard-encodings.html')
except LookupError:
print('Unknown encoding. If you want the system to automatically detect an encoding set detect_encoding=True')
print('For a detailed list of encodings available in Python visit https://docs.python.org/2.4/lib/standard-encodings.html')
raise
frek = convert_text(name, text, igt_text)
return frek
def main(arglist=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Convert a plain text file to Freki format",
prog='text-to-freki',
epilog='examples:\n'
' text-to-freki in.txt out.freki --igtfile=igts.txt --detect-encoding=true'
)
parser.add_argument('infile', help='plain text file')
parser.add_argument('outfile', help='path to freki output file')
parser.add_argument('--igtfile', help='plain text file containing igt span info')
parser.add_argument('--encoding', default='utf-8', help='encoding of the input file')
parser.add_argument(
'-d', '--detect-encoding', dest='detect', default=False, help='automatically detects encoding when set to true'
)
parser.add_argument(
'-v', '--verbose',
action='count', dest='verbosity', default=2,
help='increase the verbosity (can be repeated: -vvv)'
)
args = parser.parse_args(arglist)
logging.basicConfig(level=50-(args.verbosity*10))
run(args)
if __name__ == '__main__':
main() | mit |
matthiasdiener/spack | lib/spack/external/py/_code/code.py | 23 | 27441 | import py
import sys
from inspect import CO_VARARGS, CO_VARKEYWORDS
builtin_repr = repr
reprlib = py.builtin._tryimport('repr', 'reprlib')
if sys.version_info[0] >= 3:
from traceback import format_exception_only
else:
from py._code._py2traceback import format_exception_only
class Code(object):
""" wrapper around Python code objects """
def __init__(self, rawcode):
if not hasattr(rawcode, "co_filename"):
rawcode = py.code.getrawcode(rawcode)
try:
self.filename = rawcode.co_filename
self.firstlineno = rawcode.co_firstlineno - 1
self.name = rawcode.co_name
except AttributeError:
raise TypeError("not a code object: %r" %(rawcode,))
self.raw = rawcode
def __eq__(self, other):
return self.raw == other.raw
def __ne__(self, other):
return not self == other
@property
def path(self):
""" return a path object pointing to source code (note that it
might not point to an actually existing file). """
p = py.path.local(self.raw.co_filename)
# maybe don't try this checking
if not p.check():
# XXX maybe try harder like the weird logic
# in the standard lib [linecache.updatecache] does?
p = self.raw.co_filename
return p
@property
def fullsource(self):
""" return a py.code.Source object for the full source file of the code
"""
from py._code import source
full, _ = source.findsource(self.raw)
return full
def source(self):
""" return a py.code.Source object for the code object's source only
"""
# return source only for that part of code
return py.code.Source(self.raw)
def getargs(self, var=False):
""" return a tuple with the argument names for the code object
if 'var' is set True also return the names of the variable and
keyword arguments when present
"""
# handfull shortcut for getting args
raw = self.raw
argcount = raw.co_argcount
if var:
argcount += raw.co_flags & CO_VARARGS
argcount += raw.co_flags & CO_VARKEYWORDS
return raw.co_varnames[:argcount]
class Frame(object):
"""Wrapper around a Python frame holding f_locals and f_globals
in which expressions can be evaluated."""
def __init__(self, frame):
self.lineno = frame.f_lineno - 1
self.f_globals = frame.f_globals
self.f_locals = frame.f_locals
self.raw = frame
self.code = py.code.Code(frame.f_code)
@property
def statement(self):
""" statement this frame is at """
if self.code.fullsource is None:
return py.code.Source("")
return self.code.fullsource.getstatement(self.lineno)
def eval(self, code, **vars):
""" evaluate 'code' in the frame
'vars' are optional additional local variables
returns the result of the evaluation
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
return eval(code, self.f_globals, f_locals)
def exec_(self, code, **vars):
""" exec 'code' in the frame
'vars' are optiona; additional local variables
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
py.builtin.exec_(code, self.f_globals, f_locals )
def repr(self, object):
""" return a 'safe' (non-recursive, one-line) string repr for 'object'
"""
return py.io.saferepr(object)
def is_true(self, object):
return object
def getargs(self, var=False):
""" return a list of tuples (name, value) for all arguments
if 'var' is set True also include the variable and keyword
arguments when present
"""
retval = []
for arg in self.code.getargs(var):
try:
retval.append((arg, self.f_locals[arg]))
except KeyError:
pass # this can occur when using Psyco
return retval
class TracebackEntry(object):
""" a single entry in a traceback """
_repr_style = None
exprinfo = None
def __init__(self, rawentry):
self._rawentry = rawentry
self.lineno = rawentry.tb_lineno - 1
def set_repr_style(self, mode):
assert mode in ("short", "long")
self._repr_style = mode
@property
def frame(self):
return py.code.Frame(self._rawentry.tb_frame)
@property
def relline(self):
return self.lineno - self.frame.code.firstlineno
def __repr__(self):
return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
@property
def statement(self):
""" py.code.Source object for the current statement """
source = self.frame.code.fullsource
return source.getstatement(self.lineno)
@property
def path(self):
""" path to the source code """
return self.frame.code.path
def getlocals(self):
return self.frame.f_locals
locals = property(getlocals, None, None, "locals of underlaying frame")
def reinterpret(self):
"""Reinterpret the failing statement and returns a detailed information
about what operations are performed."""
if self.exprinfo is None:
source = str(self.statement).strip()
x = py.code._reinterpret(source, self.frame, should_fail=True)
if not isinstance(x, str):
raise TypeError("interpret returned non-string %r" % (x,))
self.exprinfo = x
return self.exprinfo
def getfirstlinesource(self):
# on Jython this firstlineno can be -1 apparently
return max(self.frame.code.firstlineno, 0)
def getsource(self, astcache=None):
""" return failing source code. """
# we use the passed in astcache to not reparse asttrees
# within exception info printing
from py._code.source import getstatementrange_ast
source = self.frame.code.fullsource
if source is None:
return None
key = astnode = None
if astcache is not None:
key = self.frame.code.path
if key is not None:
astnode = astcache.get(key, None)
start = self.getfirstlinesource()
try:
astnode, _, end = getstatementrange_ast(self.lineno, source,
astnode=astnode)
except SyntaxError:
end = self.lineno + 1
else:
if key is not None:
astcache[key] = astnode
return source[start:end]
source = property(getsource)
def ishidden(self):
""" return True if the current frame has a var __tracebackhide__
resolving to True
mostly for internal use
"""
try:
return self.frame.f_locals['__tracebackhide__']
except KeyError:
try:
return self.frame.f_globals['__tracebackhide__']
except KeyError:
return False
def __str__(self):
try:
fn = str(self.path)
except py.error.Error:
fn = '???'
name = self.frame.code.name
try:
line = str(self.statement).lstrip()
except KeyboardInterrupt:
raise
except:
line = "???"
return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line)
def name(self):
return self.frame.code.raw.co_name
name = property(name, None, None, "co_name of underlaying code")
class Traceback(list):
""" Traceback objects encapsulate and offer higher level
access to Traceback entries.
"""
Entry = TracebackEntry
def __init__(self, tb):
""" initialize from given python traceback object. """
if hasattr(tb, 'tb_next'):
def f(cur):
while cur is not None:
yield self.Entry(cur)
cur = cur.tb_next
list.__init__(self, f(tb))
else:
list.__init__(self, tb)
def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
""" return a Traceback instance wrapping part of this Traceback
by provding any combination of path, lineno and firstlineno, the
first frame to start the to-be-returned traceback is determined
this allows cutting the first part of a Traceback instance e.g.
for formatting reasons (removing some uninteresting bits that deal
with handling of the exception/traceback)
"""
for x in self:
code = x.frame.code
codepath = code.path
if ((path is None or codepath == path) and
(excludepath is None or not hasattr(codepath, 'relto') or
not codepath.relto(excludepath)) and
(lineno is None or x.lineno == lineno) and
(firstlineno is None or x.frame.code.firstlineno == firstlineno)):
return Traceback(x._rawentry)
return self
def __getitem__(self, key):
val = super(Traceback, self).__getitem__(key)
if isinstance(key, type(slice(0))):
val = self.__class__(val)
return val
def filter(self, fn=lambda x: not x.ishidden()):
""" return a Traceback instance with certain items removed
fn is a function that gets a single argument, a TracebackItem
instance, and should return True when the item should be added
to the Traceback, False when not
by default this removes all the TracebackItems which are hidden
(see ishidden() above)
"""
return Traceback(filter(fn, self))
def getcrashentry(self):
""" return last non-hidden traceback entry that lead
to the exception of a traceback.
"""
for i in range(-1, -len(self)-1, -1):
entry = self[i]
if not entry.ishidden():
return entry
return self[-1]
def recursionindex(self):
""" return the index of the frame/TracebackItem where recursion
originates if appropriate, None if no recursion occurred
"""
cache = {}
for i, entry in enumerate(self):
# id for the code.raw is needed to work around
# the strange metaprogramming in the decorator lib from pypi
# which generates code objects that have hash/value equality
#XXX needs a test
key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
#print "checking for recursion at", key
l = cache.setdefault(key, [])
if l:
f = entry.frame
loc = f.f_locals
for otherloc in l:
if f.is_true(f.eval(co_equal,
__recursioncache_locals_1=loc,
__recursioncache_locals_2=otherloc)):
return i
l.append(entry.frame.f_locals)
return None
co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
'?', 'eval')
class ExceptionInfo(object):
""" wraps sys.exc_info() objects and offers
help for navigating the traceback.
"""
_striptext = ''
def __init__(self, tup=None, exprinfo=None):
if tup is None:
tup = sys.exc_info()
if exprinfo is None and isinstance(tup[1], AssertionError):
exprinfo = getattr(tup[1], 'msg', None)
if exprinfo is None:
exprinfo = str(tup[1])
if exprinfo and exprinfo.startswith('assert '):
self._striptext = 'AssertionError: '
self._excinfo = tup
#: the exception class
self.type = tup[0]
#: the exception instance
self.value = tup[1]
#: the exception raw traceback
self.tb = tup[2]
#: the exception type name
self.typename = self.type.__name__
#: the exception traceback (py.code.Traceback instance)
self.traceback = py.code.Traceback(self.tb)
def __repr__(self):
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
def exconly(self, tryshort=False):
""" return the exception as a string
when 'tryshort' resolves to True, and the exception is a
py.code._AssertionError, only the actual exception part of
the exception representation is returned (so 'AssertionError: ' is
removed from the beginning)
"""
lines = format_exception_only(self.type, self.value)
text = ''.join(lines)
text = text.rstrip()
if tryshort:
if text.startswith(self._striptext):
text = text[len(self._striptext):]
return text
def errisinstance(self, exc):
""" return True if the exception is an instance of exc """
return isinstance(self.value, exc)
def _getreprcrash(self):
exconly = self.exconly(tryshort=True)
entry = self.traceback.getcrashentry()
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
return ReprFileLocation(path, lineno+1, exconly)
def getrepr(self, showlocals=False, style="long",
abspath=False, tbfilter=True, funcargs=False):
""" return str()able representation of this exception info.
showlocals: show locals per traceback entry
style: long|short|no|native traceback style
tbfilter: hide entries (where __tracebackhide__ is true)
in case of style==native, tbfilter and showlocals is ignored.
"""
if style == 'native':
return ReprExceptionInfo(ReprTracebackNative(
py.std.traceback.format_exception(
self.type,
self.value,
self.traceback[0]._rawentry,
)), self._getreprcrash())
fmt = FormattedExcinfo(showlocals=showlocals, style=style,
abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
return fmt.repr_excinfo(self)
def __str__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return str(loc)
def __unicode__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return loc.__unicode__()
class FormattedExcinfo(object):
""" presenting information about failing Functions and Generators. """
# for traceback entries
flow_marker = ">"
fail_marker = "E"
def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
self.showlocals = showlocals
self.style = style
self.tbfilter = tbfilter
self.funcargs = funcargs
self.abspath = abspath
self.astcache = {}
def _getindent(self, source):
# figure out indent for given source
try:
s = str(source.getstatement(len(source)-1))
except KeyboardInterrupt:
raise
except:
try:
s = str(source[-1])
except KeyboardInterrupt:
raise
except:
return 0
return 4 + (len(s) - len(s.lstrip()))
def _getentrysource(self, entry):
source = entry.getsource(self.astcache)
if source is not None:
source = source.deindent()
return source
def _saferepr(self, obj):
return py.io.saferepr(obj)
def repr_args(self, entry):
if self.funcargs:
args = []
for argname, argvalue in entry.frame.getargs(var=True):
args.append((argname, self._saferepr(argvalue)))
return ReprFuncArgs(args)
def get_source(self, source, line_index=-1, excinfo=None, short=False):
""" return formatted and marked up source lines. """
lines = []
if source is None or line_index >= len(source.lines):
source = py.code.Source("???")
line_index = 0
if line_index < 0:
line_index += len(source)
space_prefix = " "
if short:
lines.append(space_prefix + source.lines[line_index].strip())
else:
for line in source.lines[:line_index]:
lines.append(space_prefix + line)
lines.append(self.flow_marker + " " + source.lines[line_index])
for line in source.lines[line_index+1:]:
lines.append(space_prefix + line)
if excinfo is not None:
indent = 4 if short else self._getindent(source)
lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
return lines
def get_exconly(self, excinfo, indent=4, markall=False):
lines = []
indent = " " * indent
# get the real exception information out
exlines = excinfo.exconly(tryshort=True).split('\n')
failindent = self.fail_marker + indent[1:]
for line in exlines:
lines.append(failindent + line)
if not markall:
failindent = indent
return lines
def repr_locals(self, locals):
if self.showlocals:
lines = []
keys = [loc for loc in locals if loc[0] != "@"]
keys.sort()
for name in keys:
value = locals[name]
if name == '__builtins__':
lines.append("__builtins__ = <builtins>")
else:
# This formatting could all be handled by the
# _repr() function, which is only reprlib.Repr in
# disguise, so is very configurable.
str_repr = self._saferepr(value)
#if len(str_repr) < 70 or not isinstance(value,
# (list, tuple, dict)):
lines.append("%-10s = %s" %(name, str_repr))
#else:
# self._line("%-10s =\\" % (name,))
# # XXX
# py.std.pprint.pprint(value, stream=self.excinfowriter)
return ReprLocals(lines)
def repr_traceback_entry(self, entry, excinfo=None):
source = self._getentrysource(entry)
if source is None:
source = py.code.Source("???")
line_index = 0
else:
# entry.getfirstlinesource() can be -1, should be 0 on jython
line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
lines = []
style = entry._repr_style
if style is None:
style = self.style
if style in ("short", "long"):
short = style == "short"
reprargs = self.repr_args(entry) if not short else None
s = self.get_source(source, line_index, excinfo, short=short)
lines.extend(s)
if short:
message = "in %s" %(entry.name)
else:
message = excinfo and excinfo.typename or ""
path = self._makepath(entry.path)
filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
localsrepr = None
if not short:
localsrepr = self.repr_locals(entry.locals)
return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
if excinfo:
lines.extend(self.get_exconly(excinfo, indent=4))
return ReprEntry(lines, None, None, None, style)
def _makepath(self, path):
if not self.abspath:
try:
np = py.path.local().bestrelpath(path)
except OSError:
return path
if len(np) < len(str(path)):
path = np
return path
def repr_traceback(self, excinfo):
traceback = excinfo.traceback
if self.tbfilter:
traceback = traceback.filter()
recursionindex = None
if excinfo.errisinstance(RuntimeError):
if "maximum recursion depth exceeded" in str(excinfo.value):
recursionindex = traceback.recursionindex()
last = traceback[-1]
entries = []
extraline = None
for index, entry in enumerate(traceback):
einfo = (last == entry) and excinfo or None
reprentry = self.repr_traceback_entry(entry, einfo)
entries.append(reprentry)
if index == recursionindex:
extraline = "!!! Recursion detected (same locals & position)"
break
return ReprTraceback(entries, extraline, style=self.style)
def repr_excinfo(self, excinfo):
reprtraceback = self.repr_traceback(excinfo)
reprcrash = excinfo._getreprcrash()
return ReprExceptionInfo(reprtraceback, reprcrash)
class TerminalRepr:
def __str__(self):
s = self.__unicode__()
if sys.version_info[0] < 3:
s = s.encode('utf-8')
return s
def __unicode__(self):
# FYI this is called from pytest-xdist's serialization of exception
# information.
io = py.io.TextIO()
tw = py.io.TerminalWriter(file=io)
self.toterminal(tw)
return io.getvalue().strip()
def __repr__(self):
return "<%s instance at %0x>" %(self.__class__, id(self))
class ReprExceptionInfo(TerminalRepr):
def __init__(self, reprtraceback, reprcrash):
self.reprtraceback = reprtraceback
self.reprcrash = reprcrash
self.sections = []
def addsection(self, name, content, sep="-"):
self.sections.append((name, content, sep))
def toterminal(self, tw):
self.reprtraceback.toterminal(tw)
for name, content, sep in self.sections:
tw.sep(sep, name)
tw.line(content)
class ReprTraceback(TerminalRepr):
entrysep = "_ "
def __init__(self, reprentries, extraline, style):
self.reprentries = reprentries
self.extraline = extraline
self.style = style
def toterminal(self, tw):
# the entries might have different styles
last_style = None
for i, entry in enumerate(self.reprentries):
if entry.style == "long":
tw.line("")
entry.toterminal(tw)
if i < len(self.reprentries) - 1:
next_entry = self.reprentries[i+1]
if entry.style == "long" or \
entry.style == "short" and next_entry.style == "long":
tw.sep(self.entrysep)
if self.extraline:
tw.line(self.extraline)
class ReprTracebackNative(ReprTraceback):
def __init__(self, tblines):
self.style = "native"
self.reprentries = [ReprEntryNative(tblines)]
self.extraline = None
class ReprEntryNative(TerminalRepr):
style = "native"
def __init__(self, tblines):
self.lines = tblines
def toterminal(self, tw):
tw.write("".join(self.lines))
class ReprEntry(TerminalRepr):
localssep = "_ "
def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
self.lines = lines
self.reprfuncargs = reprfuncargs
self.reprlocals = reprlocals
self.reprfileloc = filelocrepr
self.style = style
def toterminal(self, tw):
if self.style == "short":
self.reprfileloc.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
#tw.line("")
return
if self.reprfuncargs:
self.reprfuncargs.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
if self.reprlocals:
#tw.sep(self.localssep, "Locals")
tw.line("")
self.reprlocals.toterminal(tw)
if self.reprfileloc:
if self.lines:
tw.line("")
self.reprfileloc.toterminal(tw)
def __str__(self):
return "%s\n%s\n%s" % ("\n".join(self.lines),
self.reprlocals,
self.reprfileloc)
class ReprFileLocation(TerminalRepr):
def __init__(self, path, lineno, message):
self.path = str(path)
self.lineno = lineno
self.message = message
def toterminal(self, tw):
# filename and lineno output for each entry,
# using an output format that most editors unterstand
msg = self.message
i = msg.find("\n")
if i != -1:
msg = msg[:i]
tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
class ReprLocals(TerminalRepr):
def __init__(self, lines):
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
class ReprFuncArgs(TerminalRepr):
def __init__(self, args):
self.args = args
def toterminal(self, tw):
if self.args:
linesofar = ""
for name, value in self.args:
ns = "%s = %s" %(name, value)
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
if linesofar:
tw.line(linesofar)
linesofar = ns
else:
if linesofar:
linesofar += ", " + ns
else:
linesofar = ns
if linesofar:
tw.line(linesofar)
tw.line("")
oldbuiltins = {}
def patch_builtins(assertion=True, compile=True):
""" put compile and AssertionError builtins to Python's builtins. """
if assertion:
from py._code import assertion
l = oldbuiltins.setdefault('AssertionError', [])
l.append(py.builtin.builtins.AssertionError)
py.builtin.builtins.AssertionError = assertion.AssertionError
if compile:
l = oldbuiltins.setdefault('compile', [])
l.append(py.builtin.builtins.compile)
py.builtin.builtins.compile = py.code.compile
def unpatch_builtins(assertion=True, compile=True):
""" remove compile and AssertionError builtins from Python builtins. """
if assertion:
py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
if compile:
py.builtin.builtins.compile = oldbuiltins['compile'].pop()
def getrawcode(obj, trycall=True):
""" return code object for given function. """
try:
return obj.__code__
except AttributeError:
obj = getattr(obj, 'im_func', obj)
obj = getattr(obj, 'func_code', obj)
obj = getattr(obj, 'f_code', obj)
obj = getattr(obj, '__code__', obj)
if trycall and not hasattr(obj, 'co_firstlineno'):
if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
x = getrawcode(obj.__call__, trycall=False)
if hasattr(x, 'co_firstlineno'):
return x
return obj
| lgpl-2.1 |
litecoinxt/litecoinxt | qa/rpc-tests/txn_doublespend.py | 152 | 4968 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with malleable transactions
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from decimal import Decimal
from util import *
import os
import shutil
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].move("", "foo", 1220)
self.nodes[0].move("", "bar", 30)
assert_equal(self.nodes[0].getbalance(""), 0)
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1210 BTC to node1_address,
# but don't broadcast:
(total_in, inputs) = gather_inputs(self.nodes[0], 1210)
change_address = self.nodes[0].getnewaddress("foo")
outputs = {}
outputs[change_address] = 40
outputs[node1_address] = 1210
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two transaction from node[0] to node[1]; the
# second must spend change from the first because the first
# spends all mature inputs:
txid1 = self.nodes[0].sendfrom("foo", node1_address, 1210, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus 1210, minus 20, and minus transaction fees:
expected = starting_balance
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo"), 1220+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar"), 30+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend to miner:
mutated_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].setgenerate(True, 1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].setgenerate(True, 1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -1)
assert_equal(tx2["confirmations"], -1)
# Node0's total balance should be starting balance, plus 100BTC for
# two more matured blocks, minus 1210 for the double-spend:
expected = starting_balance + 100 - 1210
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# foo account should be debited, but bar account should not:
assert_equal(self.nodes[0].getbalance("foo"), 1220-1210)
assert_equal(self.nodes[0].getbalance("bar"), 30)
# Node1's "from" account balance should be just the mutated send:
assert_equal(self.nodes[1].getbalance("from0"), 1210)
if __name__ == '__main__':
TxnMallTest().main()
| mit |
rldhont/Quantum-GIS | tests/src/python/test_qgssymbollayer_readsld.py | 30 | 17835 | # -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgssymbollayer_readsld.py
---------------------
Date : January 2017
Copyright : (C) 2017, Jorge Gustavo Rocha
Email : jgr at di dot uminho dot pt
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Jorge Gustavo Rocha'
__date__ = 'January 2017'
__copyright__ = '(C) 2017, Jorge Gustavo Rocha'
import qgis # NOQA
import os
from qgis.PyQt.QtXml import QDomDocument
from qgis.testing import start_app, unittest
from qgis.core import (QgsVectorLayer,
QgsFeature,
QgsGeometry,
QgsUnitTypes,
QgsPointXY,
QgsSvgMarkerSymbolLayer,
QgsEllipseSymbolLayer,
QgsSimpleFillSymbolLayer,
QgsSVGFillSymbolLayer,
QgsSvgMarkerSymbolLayer,
QgsLinePatternFillSymbolLayer,
QgsSimpleLineSymbolLayer,
QgsMarkerLineSymbolLayer,
QgsSimpleMarkerSymbolLayer,
QgsFontMarkerSymbolLayer
)
from qgis.testing.mocked import get_iface
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
def createLayerWithOneLine():
# create a temporary layer
# linelayer = iface.addVectorLayer("LineString?crs=epsg:4326&field=gid:int&field=name:string", "simple_line", "memory")
linelayer = QgsVectorLayer("LineString?crs=epsg:4326&field=gid:int&field=name:string", "simple_line", "memory")
one = QgsFeature(linelayer.dataProvider().fields(), 0)
one.setAttributes([1, 'one'])
one.setGeometry(QgsGeometry.fromPolylineXY([QgsPointXY(-7, 38), QgsPointXY(-8, 42)]))
linelayer.dataProvider().addFeatures([one])
return linelayer
def createLayerWithOnePoint():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer",
"addfeat", "memory")
pr = layer.dataProvider()
f = QgsFeature()
f.setAttributes(["test", 123])
f.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
assert pr.addFeatures([f])
assert layer.featureCount() == 1
return layer
def createLayerWithOnePolygon():
layer = QgsVectorLayer("Polygon?crs=epsg:3111&field=pk:int", "vl", "memory")
assert layer.isValid()
f1 = QgsFeature(layer.dataProvider().fields(), 1)
f1.setAttribute("pk", 1)
f1.setGeometry(QgsGeometry.fromPolygonXY([[QgsPointXY(2484588, 2425722), QgsPointXY(2482767, 2398853),
QgsPointXY(2520109, 2397715), QgsPointXY(2520792, 2425494),
QgsPointXY(2484588, 2425722)]]))
assert layer.dataProvider().addFeatures([f1])
return layer
class TestQgsSymbolLayerReadSld(unittest.TestCase):
"""
This class checks if SLD styles are properly applied
"""
def setUp(self):
self.iface = get_iface()
# test <CSSParameter>VALUE<CSSParameter/>
# test <CSSParameter><ogc:Literal>VALUE<ogc:Literal/><CSSParameter/>
def test_Literal_within_CSSParameter(self):
layer = createLayerWithOneLine()
mFilePath = os.path.join(TEST_DATA_DIR, 'symbol_layer/external_sld/simple_streams.sld')
layer.loadSldStyle(mFilePath)
props = layer.renderer().symbol().symbolLayers()[0].properties()
def testLineColor():
# stroke CSSParameter within ogc:Literal
# expected color is #003EBA, RGB 0,62,186
self.assertEqual(layer.renderer().symbol().symbolLayers()[0].color().name(), '#003eba')
def testLineWidth():
# stroke-width CSSParameter within ogc:Literal
self.assertEqual(props['line_width'], '2')
def testLineOpacity():
# stroke-opacity CSSParameter NOT within ogc:Literal
# stroke-opacity=0.1
self.assertEqual(props['line_color'], '0,62,186,25')
testLineColor()
testLineWidth()
testLineOpacity()
def testSimpleMarkerRotation(self):
"""
Test if pointMarker property sld:Rotation value can be read if format is:
<sld:Rotation>50.0</sld:Rotation>
or
<se:Rotation><ogc:Literal>50</ogc:Literal></se:Rotation>
"""
# technically it's not necessary to use a real shape, but a empty memory
# layer. In case these tests will upgrade to a rendering where to
# compare also rendering not only properties
# myShpFile = os.path.join(unitTestDataPath(), 'points.shp')
# layer = QgsVectorLayer(myShpFile, 'points', 'ogr')
layer = QgsVectorLayer("Point", "addfeat", "memory")
assert (layer.isValid())
# test if able to read <sld:Rotation>50.0</sld:Rotation>
mFilePath = os.path.join(unitTestDataPath(),
'symbol_layer/external_sld/testSimpleMarkerRotation-directValue.sld')
layer.loadSldStyle(mFilePath)
props = layer.renderer().symbol().symbolLayers()[0].properties()
self.assertEqual(props['angle'], '50')
# test if able to read <se:Rotation><ogc:Literal>50</ogc:Literal></se:Rotation>
mFilePath = os.path.join(unitTestDataPath(),
'symbol_layer/external_sld/testSimpleMarkerRotation-ogcLiteral.sld')
layer.loadSldStyle(mFilePath)
props = layer.renderer().symbol().symbolLayers()[0].properties()
self.assertEqual(props['angle'], '50')
def testSymbolSizeUom(self):
# create a layer
layer = createLayerWithOnePoint()
# load a sld with marker size without uom attribute (pixels)
sld = 'symbol_layer/QgsSvgMarkerSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 12
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
unit = sl.outputUnit()
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
# load a sld with marker size with uom attribute in pixel
sld = 'symbol_layer/QgsSvgMarkerSymbolLayerUomPixel.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 12
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
unit = sl.outputUnit()
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
# load a sld with marker size with uom attribute in meter
sld = 'symbol_layer/QgsSvgMarkerSymbolLayerUomMetre.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 12 / (0.28 * 0.001)
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
unit = sl.outputUnit()
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertAlmostEqual(size, sld_size_px, delta=0.1)
# load a sld with marker size with uom attribute in foot
sld = 'symbol_layer/QgsSvgMarkerSymbolLayerUomFoot.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 12 * (304.8 / 0.28)
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
unit = sl.outputUnit()
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertAlmostEqual(size, sld_size_px, delta=0.1)
def testSymbolSize(self):
# create a layers
layer = createLayerWithOnePoint()
player = createLayerWithOnePolygon()
# size test for QgsEllipseSymbolLayer
sld = 'symbol_layer/QgsEllipseSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 7
sld_stroke_width_px = 1
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.symbolWidth()
stroke_width = sl.strokeWidth()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsEllipseSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
self.assertEqual(stroke_width, sld_stroke_width_px)
# size test for QgsVectorFieldSymbolLayer
# createFromSld not implemented
# size test for QgsSimpleFillSymbolLayer
sld = 'symbol_layer/QgsSimpleFillSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
player.loadSldStyle(mFilePath)
sld_stroke_width_px = 0.26
sl = player.renderer().symbol().symbolLayers()[0]
stroke_width = sl.strokeWidth()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsSimpleFillSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(stroke_width, sld_stroke_width_px)
# size test for QgsSVGFillSymbolLayer
sld = 'symbol_layer/QgsSVGFillSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
player.loadSldStyle(mFilePath)
sld_size_px = 6
sld_stroke_width_px = 3
sl = player.renderer().symbol().symbolLayers()[0]
size = sl.patternWidth()
stroke_width = sl.svgStrokeWidth()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsSVGFillSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
self.assertEqual(stroke_width, sld_stroke_width_px)
# size test for QgsSvgMarkerSymbolLayer
sld = 'symbol_layer/QgsSvgMarkerSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 12
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsSvgMarkerSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
# size test for QgsPointPatternFillSymbolLayer
# createFromSld not implemented
# size test for QgsLinePatternFillSymbolLayer
sld = 'symbol_layer/QgsLinePatternFillSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
player.loadSldStyle(mFilePath)
sld_size_px = 4
sld_stroke_width_px = 1.5
sl = player.renderer().symbol().symbolLayers()[0]
size = sl.distance()
stroke_width = sl.lineWidth()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsLinePatternFillSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
self.assertEqual(stroke_width, sld_stroke_width_px)
# test size for QgsSimpleLineSymbolLayer
sld = 'symbol_layer/QgsSimpleLineSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
player.loadSldStyle(mFilePath)
sld_stroke_width_px = 1.26
sl = player.renderer().symbol().symbolLayers()[0]
stroke_width = sl.width()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsSimpleLineSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(stroke_width, sld_stroke_width_px)
# test size for QgsMarkerLineSymbolLayer
sld = 'symbol_layer/QgsMarkerLineSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
player.loadSldStyle(mFilePath)
sld_interval_px = 3.3
sld_offset_px = 6.6
sl = player.renderer().symbol().symbolLayers()[0]
interval = sl.interval()
offset = sl.offset()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsMarkerLineSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(interval, sld_interval_px)
self.assertEqual(offset, sld_offset_px)
# test size for QgsSimpleMarkerSymbolLayer
sld = 'symbol_layer/QgsSimpleMarkerSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 6
sld_displacement_x_px = 3.3
sld_displacement_y_px = 6.6
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
offset = sl.offset()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsSimpleMarkerSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
self.assertEqual(offset.x(), sld_displacement_x_px)
self.assertEqual(offset.y(), sld_displacement_y_px)
# test size for QgsSVGMarkerSymbolLayer
sld = 'symbol_layer/QgsSvgMarkerSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 12
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
self.assertTrue(isinstance(sl, QgsSvgMarkerSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
# test size for QgsFontMarkerSymbolLayer
sld = 'symbol_layer/QgsFontMarkerSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 6.23
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
self.assertTrue(isinstance(sl, QgsFontMarkerSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
def testSymbolSizeAfterReload(self):
# create a layer
layer = createLayerWithOnePoint()
# load a sld with marker size
sld = 'symbol_layer/QgsSvgMarkerSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
# get the size and unit of the symbol
sl = layer.renderer().symbol().symbolLayers()[0]
first_size = sl.size()
first_unit = sl.outputUnit() # in pixels
# export sld into a qdomdocument with namespace processing activated
doc = QDomDocument()
msg = ""
layer.exportSldStyle(doc, msg)
doc.setContent(doc.toString(), True)
self.assertTrue(msg == "")
# reload the same sld
root = doc.firstChildElement("StyledLayerDescriptor")
el = root.firstChildElement("NamedLayer")
layer.readSld(el, msg)
# extract the size and unit of symbol
sl = layer.renderer().symbol().symbolLayers()[0]
second_size = sl.size()
second_unit = sl.outputUnit()
# size and unit should be the same after export and reload the same
# sld description
self.assertEqual(first_size, second_size)
self.assertEqual(first_unit, second_unit)
def test_Literal_within_CSSParameter_and_Text(self):
layer = createLayerWithOneLine()
mFilePath = os.path.join(TEST_DATA_DIR, 'symbol_layer/external_sld/simple_line_with_text.sld')
layer.loadSldStyle(mFilePath)
props = layer.renderer().symbol().symbolLayers()[0].properties()
def testLineColor():
# stroke SvgParameter within ogc:Literal
# expected color is #003EBA, RGB 0,62,186
self.assertEqual(layer.renderer().symbol().symbolLayers()[0].color().name(), '#003eba')
def testLineWidth():
# stroke-width SvgParameter within ogc:Literal
self.assertEqual(props['line_width'], '2')
def testLineOpacity():
# stroke-opacity SvgParameter NOT within ogc:Literal
# stroke-opacity=0.1
self.assertEqual(props['line_color'], '0,62,186,24')
testLineColor()
testLineWidth()
testLineOpacity()
from qgis.core import QgsPalLayerSettings
self.assertTrue(layer.labelsEnabled())
self.assertEqual(layer.labeling().type(), 'simple')
settings = layer.labeling().settings()
self.assertEqual(settings.fieldName, 'name')
format = settings.format()
self.assertEqual(format.color().name(), '#ff0000')
font = format.font()
self.assertEqual(font.family(), 'QGIS Vera Sans')
self.assertTrue(font.bold())
self.assertFalse(font.italic())
self.assertEqual(format.size(), 18)
self.assertEqual(format.sizeUnit(), QgsUnitTypes.RenderPixels)
# the layer contains lines
# from qgis.core import QgsWkbTypes
# self.assertEqual(layer.geometryType(), QgsWkbTypes.LineGeometry)
# the placement should be QgsPalLayerSettings.Line
self.assertEqual(settings.placement, QgsPalLayerSettings.AroundPoint)
self.assertEqual(settings.xOffset, 1)
self.assertEqual(settings.yOffset, 0)
self.assertEqual(settings.offsetUnits, QgsUnitTypes.RenderPixels)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
guorendong/iridium-browser-ubuntu | third_party/chromite/scripts/cros_generate_local_binhosts.py | 2 | 2585 | # Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script for calculating compatible binhosts.
Generates a file that sets the specified board's binhosts to include all of the
other compatible boards in this buildroot.
"""
from __future__ import print_function
import collections
import glob
import optparse
import os
import sys
from chromite.lib import cros_build_lib
def FindCandidateBoards():
"""Find candidate local boards to grab prebuilts from."""
portageq_prefix = "/usr/local/bin/portageq-"
for path in sorted(glob.glob("%s*" % portageq_prefix)):
# Strip off the portageq prefix, leaving only the board.
yield path.replace(portageq_prefix, "")
def SummarizeCompatibility(board):
"""Returns a string that will be the same for compatible boards."""
cmd = ["portageq-%s" % board, "envvar", "ARCH", "CFLAGS"]
return cros_build_lib.RunCommand(cmd, redirect_stdout=True,
print_cmd=False).output.rstrip()
def GenerateBinhostLine(build_root, compatible_boards):
"""Generate a binhost line pulling binaries from the specified boards."""
# TODO(davidjames): Prioritize binhosts with more matching use flags.
local_binhosts = " ".join([
"file://localhost" + os.path.join(build_root, x, "packages")
for x in sorted(compatible_boards)])
return "LOCAL_BINHOST='%s'" % local_binhosts
def main(argv):
parser = optparse.OptionParser(usage="USAGE: ./%prog --board=board [options]")
parser.add_option("--build_root", default="/build",
dest="build_root",
help="Location of boards (normally /build)")
parser.add_option("--board", default=None,
dest="board",
help="Board name (required).")
flags, remaining_arguments = parser.parse_args(argv)
if remaining_arguments or not flags.board:
parser.print_help()
sys.exit(1)
by_compatibility = collections.defaultdict(set)
compatible_boards = None
for other_board in FindCandidateBoards():
compat_id = SummarizeCompatibility(other_board)
if other_board == flags.board:
compatible_boards = by_compatibility[compat_id]
else:
by_compatibility[compat_id].add(other_board)
if compatible_boards is None:
print('Missing portageq wrapper for %s' % flags.board, file=sys.stderr)
sys.exit(1)
print('# Generated by cros_generate_local_binhosts.')
print(GenerateBinhostLine(flags.build_root, compatible_boards))
| bsd-3-clause |
deadc0de6/pyircbot | libirc.py | 1 | 18336 | # author: deadc0de6
# contact: https://github.com/deadc0de6
#
# python IRC library
#
# Copyright (C) 2015 deadc0de6
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ref:
# RFC 2812: https://tools.ietf.org/html/rfc2812
# https://www.alien.net.au/irc/irc2numerics.html
#
import socket
import ssl
import sys
import os
import time
import datetime
import string
import random
import threading
import signal
import select
import Queue
class libirc():
BUFLEN = 65535 # buffer to read IRC server message
DEBUG = False
LOGCHAT_NBLINE = 100 # per channel
CONN_TIMEOUT = 5 # second
SELECTTO = 1
ISOPER = False
SSL_CERT_PATH = '/etc/ssl/certs/ca-certificates.crt'
IRC_CMD_QUIT = 'quit'
IRC_CMD_PASS = 'pass'
IRC_CMD_JOIN = 'join' # <channel>
IRC_CMD_USER = 'user' # <username> <hostname> <servername> <realname>
IRC_CMD_NICK = 'nick' # <nickname>
IRC_CMD_PRIVMSG = 'privmsg' # <user> <message>
IRC_CMD_PING = 'ping'
IRC_CMD_PONG = 'pong'
IRC_CMD_LIST = 'list'
IRC_CMD_OPER = 'oper'
IRC_CMD_KICK = 'kick'
IRC_CMD_KILL = 'kill'
IRC_CMD_ERROR = 'error'
IRC_CMD_NAMES = 'names'
IRC_CMD_WHO = 'who'
IRC_CMD_NOTICE = 'notice'
IRC_CMD_CHANNEL = '321'
IRC_CMD_ACHANNEL = '322'
IRC_CMD_CHANNEL_END = '323'
IRC_CMD_MOTD = '375'
IRC_CMD_MTD_END = '376'
IRC_ERR_NICK = '433'
IRC_CMD_CHAN_NAMES = '353'
IRC_CMD_OPER_OK = '381'
# @host: irc server address
# @port: irc server port
# @ssl: use SSL (True) or not (False
# @nick: the bot nick
# @channel: the bot channel
# @threaded: is it to be threaded
# TODO really need the threaded option ??
def __init__(self, host, port, ssl, nick, channel, threaded=True):
self._server_host = host
self._server_port = port
self._server_ssl = ssl
self._nick = nick
self._orichannel = channel
self._threaded = threaded # call to callback are threaded
# some glob vars
self._pwd = None # server password
self._channels = [] # list of channels
self._conn_channels = [] # list of channels
self._nicks = [] # list of nicks on channels I'm on
self._connected = False
self._stop = False
self._logchat = False
self._logchat_logs = {} # for each channel name a list of exchange
self._sndqueue = Queue.Queue()
# callbacks
self._irc_event_on_message = None
self._irc_event_on_privmessage = None
self._irc_event_on_disconnect = None
self._irc_event_on_connect = None
self._irc_event_on_new_channel = None
self._irc_event_on_servping = None
self._irc_event_on_othermsg = None
self._irc_event_on_conn_channel = None
# dirty hack as SIG are not catched by the KeyboardInterrupt
signal.signal(signal.SIGINT, self._signal_handler)
def set_password(self, pwd):
self._pwd = pwd
# enable/disable the log chat features
# logs N lines of each channel it is on
def irc_set_logchat(self, boolean):
self._logchat = boolean
def set_callback(self, on_privmessage = None, on_message = None,\
on_disconnect = None, on_connect = None,\
on_new_channel = None, on_servping = None, on_othermsg = None,\
on_conn_chan = None):
self._irc_event_on_privmessage = on_privmessage
self._irc_event_on_message = on_message
self._irc_event_on_disconnect = on_disconnect
self._irc_event_on_connect = on_connect
self._irc_event_on_new_channel = on_new_channel
self._irc_event_on_servping = on_servping
self._irc_event_on_othermsg = on_othermsg
self._irc_event_on_conn_channel = on_conn_chan
def irc_listchannels(self):
if self._socket == None:
return False
self._irc_send_cmd(self.IRC_CMD_LIST, [])
return True
def irc_join_channel(self, chanstr):
if self._socket == None:
return False
if not chanstr in self._conn_channels:
self._irc_send_cmd(self.IRC_CMD_JOIN, [chanstr])
self._conn_channels.append(chanstr)
return True
# send a message to all channel I'm on
def irc_send_to_all_channels(self, msg):
for chan in self._conn_channels:
self._irc_send_cmd(self.IRC_CMD_PRIVMSG, [chan], [msg])
# send a message to a channel
def irc_channel_broadcast(self, chan, msg):
if not self._connected:
return False
if not chan in self._conn_channels:
return False
self._irc_send_cmd(self.IRC_CMD_PRIVMSG, [chan], [msg])
return True
# send a private message to a user
def irc_privmsg(self, user, msg):
if not self._connected:
return False
for i in msg.splitlines():
self._irc_send_cmd(self.IRC_CMD_PRIVMSG, [user], [i])
return True
# kill a user
def irc_kill(self, user, reason):
if not self._connected:
return False
if not self.ISOPER:
return False
self._irc_send_cmd(self.IRC_CMD_KILL, [user, reason])
return True
# kick user from all channels
def irc_kick_all(self, user, reason):
if not self._connected:
return False
if not self.ISOPER:
return False
self.irc_kick(user, '*', reason)
return True
# kick a user from a channel
# syntax: kick <channel> <user> <reason>
def irc_kick(self, user, channel, reason):
if not self._connected:
return False
if not self.ISOPER:
return False
self._irc_send_cmd(self.IRC_CMD_KICK, [user, channel, reason])
return True
# do we know this user
def irc_known_nick(self, user):
return (user in self._nicks)
# 0 means all line
def irc_get_chan_logs(self, chan, nbline=0):
nbline = int(nbline)
if not self._logchat:
return None
if not chan in self._logchat_logs:
return None
if nbline == 0 or nbline > len(self._logchat_logs[chan]):
return self._logchat_logs[chan]
else:
tmp = self._logchat_logs[chan][-nbline:]
return tmp
def _signal_handler(self, signum, frame):
self._stop = True
# timestamp to string
def _ts2str(self, ts):
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts))
# print debug message
def _irc_debug(self, txt):
datestr = self._ts2str(time.time())
sys.stderr.write("[IRC] <" + datestr + "> " + txt + "\n")
# print error message
def _irc_err(self, txt):
sys.stderr.write("[IRC ERROR] " + txt)
# this is the main loop reading from the socket
def _irc_read_loop(self):
success = True
self._irc_debug('bot started (read loop started) ....')
self._socket.setblocking(0)
try:
while not self._stop:
try:
wsocks = []
rsocks = [self._socket]
esocks = [self._socket]
if not self._sndqueue.empty():
wsocks.append(self._socket)
iready,oready,eready = select.select(rsocks, wsocks, esocks,
self.SELECTTO)
if self._socket in eready:
self._irc_debug('[ERROR] socket error in select')
if self._socket in oready and not self._sndqueue.empty():
data = self._sndqueue.get()
self._socket.send(data)
if self._socket in iready:
data = self._socket.recv(self.BUFLEN)
for line in data.splitlines():
if not self._irc_handle_line(line):
# not very resilient
success = False
break
if not success:
break
except socket.error as serr:
#pass
# TODO
self._irc_err('socket error')
print serr
except select.error as serr:
#pass
# TODO
self._irc_err('select error')
print serr
except KeyboardInterrupt:
print 'interrupted ...'
self._irc_debug('bot ended (read loop exited)')
return success
# handle anything that appears on the IRC and that I see
def _irc_handle_line(self, line):
if line == None or line == '':
return True
#try:
if self.DEBUG:
self._irc_debug("<- " + line)
# TODO from here
msg = ircmsg(line)
if msg.cmd == self.IRC_CMD_CHANNEL:
# start of channel list
self._irc_debug('start of channel listing')
elif msg.cmd == self.IRC_CMD_CHANNEL_END:
# end of channel list
self._irc_debug('end of channel listing')
elif msg.cmd == self.IRC_CMD_ACHANNEL:
# new channel
self._irc_debug('<- new channel: %s' % (msg.param[1]))
self._channels.append(msg.param[1])
if self._irc_event_on_new_channel != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_new_channel,
args=(msg.param[1],))
t.start()
else:
self._irc_event_on_new_channel(name)
elif msg.cmd == self.IRC_CMD_MTD_END:
# end of MOTD
self._irc_debug('joining channel: %s' % (self._orichannel))
self._connected = True
self.irc_join_channel(self._orichannel)
elif msg.cmd == self.IRC_CMD_OPER_OK:
self.ISOPER = True
elif msg.cmd == self.IRC_ERR_NICK:
# nick registration error
self._nick = self._nick + "^"
self._irc_debug('changing nick to: %s' % (self._nick))
self._irc_send_cmd(self.IRC_CMD_NICK, [self._nick])
elif msg.cmd == self.IRC_CMD_PING:
# this is a PING from server
self._irc_debug('<- \"ping\"')
self._irc_send_cmd(self.IRC_CMD_PONG, [msg.src])
if self._irc_event_on_servping != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_servping)
t.start()
else:
self._irc_event_on_servping()
elif msg.cmd == self.IRC_CMD_PRIVMSG and msg.param[0] == self._nick:
# this is a private message sent to myself
self._irc_debug('<- private message to me')
if self._irc_event_on_privmessage != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_privmessage, args=(msg,))
t.start()
else:
self._irc_event_on_privmessage(msg)
elif msg.cmd == self.IRC_CMD_PRIVMSG and msg.param[0] != self._nick:
# this is a message sent to a channel I'm on
self._irc_debug('<- message in channel: %s' % (msg.param[0]))
if self._irc_event_on_message != None:
if self._logchat:
self._irc_log(msg)
if self._threaded:
t = threading.Thread(target=self._irc_event_on_message, args=(msg,))
t.start()
else:
self._irc_event_on_message(msg)
elif msg.cmd == self.IRC_CMD_JOIN:
if msg.user != self._nick:
# another user join our channel
self._nicks.append(msg.user)
else:
if msg.msg[0] in self._conn_channels:
if self._irc_event_on_conn_channel != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_conn_channel,
args=(msg.msg[0],))
t.start()
else:
self._irc_event_on_conn_channel(msg.msg[0])
elif msg.cmd == self.IRC_CMD_CHAN_NAMES:
# list of names in the channel
for u in msg.msg:
if not u in self._nicks and u != self._nick:
self._nicks.append(u)
self._irc_debug('nicks list updated: %s' % (','.join(self._nicks)))
elif msg.cmd == self.IRC_CMD_ERROR:
# oops some error
self._irc_debug('ERROR ! (%s)' % (' '.join(msg.msg)))
return False
elif msg.cmd == self.IRC_CMD_NOTICE:
# this is a notice
self._irc_debug('notice received: %s %s' % (msg.param[0],
' '.join(msg.msg)))
if self._irc_event_on_othermsg != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_othermsg,
args=(msg,))
t.start()
else:
self._irc_event_on_othermsg(msg)
else:
# this is what's left
#self._irc_debug('other message received: %s' % (msg.cmd))
if self._irc_event_on_othermsg != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_othermsg,
args=(msg,))
t.start()
else:
self._irc_event_on_othermsg(msg)
#except Exception as msg:
# print msg
# exc_type, exc_obj, exc_tb = sys.exc_info()
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
# print exc_type, fname, exc_tb.tb_lineno
# return False
return True
def _irc_log(self, ircmsg):
user = ircmsg.user
chan = ircmsg.param[0]
msg = ' '.join(ircmsg.msg)
if chan in self._logchat_logs:
tmp = self._logchat_logs[chan]
if len(self._logchat_logs) > self.LOGCHAT_NBLINE:
# remove one
del tmp[0]
tmp.append('[%s] %s' % (user, msg))
self._logchat_logs[chan] = tmp
else:
l = ['[%s] %s' % (user, msg)]
self._logchat_logs[chan] = l
# send an IRC command to the server
# if txt is true, it accepts space in message (last part)
# usually for command use txt=False and for message use txt=True
def _irc_send_cmd(self, cmd, params, msg=[], log=True):
if self._socket == None:
return False
data = '%s %s' % (cmd, ' '.join(params))
if msg != []:
data += ' :%s' % (' '.join(msg))
data += '\n'
if log:
self._irc_debug('-> \"%s\"' % (data.rstrip()))
self._sndqueue.put(data)
return True
# connect to IRC server
def irc_connect(self):
sock = None
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if self._server_ssl:
# https://docs.python.org/2/library/ssl.html
sock = ssl.wrap_socket(s, server_side=False)
else:
sock = s
sock.settimeout(self.CONN_TIMEOUT)
sock.connect((self._server_host, int(self._server_port)))
sock.settimeout(None)
except socket.error as err:
self._irc_err("Connection error: ")
print err
if self._server_ssl:
sock.unwrap()
sock.close()
return False
self._irc_debug("IRC socket connected!")
self._socket = sock
if self._irc_event_on_connect != None:
self._irc_debug('calling connect callback ...')
if self._threaded:
t = threading.Thread(target=self._irc_event_on_connect)
t.start()
else:
self._irc_event_on_connect()
#self._irc_debug('calling read-loop ...')
#t = threading.Thread(target=self._irc_read_loop)
#t.start()
# conn and nick
time.sleep(1)
self._irc_debug('sending nick and other information to the server ...')
self._irc_send_cmd(self.IRC_CMD_PASS, [self._pwd], log=False)
self._irc_send_cmd(self.IRC_CMD_USER, [self._nick, self._nick, self._nick], [self._nick], log=False)
self._irc_send_cmd(self.IRC_CMD_NICK, [self._nick], log=False)
# start the read loop
self._irc_debug('calling read-loop ...')
res = self._irc_read_loop()
try:
if self._server_ssl:
self._socket.unwrap()
except:
pass
self._socket.close()
return res
def irc_gooper(self, username, password):
if username != '' and password != '':
self._irc_debug('going operator ...')
self._irc_send_cmd(self.IRC_CMD_OPER, [username, password], log=False)
# disconnect to IRC server and close socket
def irc_disconnect(self):
# first send the quit command
self._irc_debug('disconnecting ...')
self._stop = True
if self._socket != None or not self._connected:
self._irc_debug("sending the IRC quit command !")
self._irc_send_cmd(self.IRC_CMD_QUIT, [])
# close the socket
if self._socket != None:
self._socket.close()
self._irc_debug('calling disconnect callback...')
if self._irc_event_on_disconnect != None:
if self._threaded:
t = threading.Thread(target=self._irc_event_on_disconnect)
t.start()
else:
self._irc_event_on_disconnect()
class ircmsg():
DEBUG = False
def __init__(self, line):
self._line = line
self.user = '' # the user that sent the message
self.src = '' # the address of the user (username@IP)
self.cmd = '' # the IRC command in the message
self.param = [] # the target of the message (#<channel> or user)
self.msg = [] # the message
self._parse_line()
# this is a naive parser
def _parse_line(self):
# format:
# :<prefix> <command> <params> :<trailing>
#try:
# identify the parts
if self.DEBUG:
self._err('full line: \"%s\"' % (self._line))
prefixend = 0
trailingstart = len(self._line)
if self._line.startswith(':'):
# prefix
dummy = self._line[1:]
prefix = dummy.split(' ')[0]
prefixend = len(prefix)+1
if '!' in prefix:
self.user = prefix.split('!')[0].lower()
self.src = prefix.split('!')[1].lower()
else:
self.src = prefix
if self.DEBUG:
self._err('prefix: %s' % (prefix))
if ' :' in self._line:
# trailing
trailing = self._line.split(' :')[1]
trailingstart = self._line.index(' :')
if self.DEBUG:
self._err('trailing: %s' % (trailing))
self.msg = [x.lower() for x in trailing.split(' ')]
cmdparams = self._line[prefixend:trailingstart].strip()
if self.DEBUG:
self._err('cmdparams: %s' % (cmdparams))
self.cmd = cmdparams.split(' ')[0].lower()
self.param = [x.lower() for x in cmdparams.split(' ')[1:]]
#except:
# self._err(self._line)
if self.DEBUG:
self.print_msg()
def _err(self, string):
sys.stderr.write('[IRCMSG] %s\n' % (string))
def print_msg(self):
self._err('Message (%s)' % (self._line))
self._err('\tuser: %s' % (self.user))
self._err('\tsrc: %s' % (self.src))
self._err('\tcmd: %s' % (self.cmd))
self._err('\tparam: %s' % (','.join(self.param)))
self._err('\tmsg: %s' % (','.join(self.msg)))
| gpl-2.0 |
druuu/django | django/db/backends/sqlite3/introspection.py | 204 | 11332 | import re
from collections import namedtuple
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('default',))
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ('CharField', {'max_length': size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [
FieldInfo(
info['name'],
info['type'],
None,
info['size'],
None,
None,
info['null_ok'],
info['default'],
) for info in self._table_info(cursor, table_name)
]
def column_name_converter(self, name):
"""
SQLite will in some cases, e.g. when returning columns from views and
subselects, return column names in 'alias."column"' format instead of
simply 'column'.
Affects SQLite < 3.7.15, fixed by http://www.sqlite.org/src/info/5526e0aa3c
"""
# TODO: remove when SQLite < 3.7.15 is sufficiently old.
# 3.7.13 ships in Debian stable as of 2014-03-21.
if self.connection.Database.sqlite_version_info < (3, 7, 15):
return name.split('.')[-1].strip('"')
else:
return name
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
try:
results = cursor.fetchone()[0].strip()
except TypeError:
# It might be a view, then no results will be returned
return relations
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match('FOREIGN KEY\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_indexes(self, cursor, table_name):
indexes = {}
for info in self._table_info(cursor, table_name):
if info['pk'] != 0:
indexes[info['name']] = {'primary_key': True,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': indexes.get(name, {}).get("primary_key", False),
'unique': unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
results = row[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY( AUTOINCREMENT)?$', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, default_value, pk
return [{
'name': field[1],
'type': field[2],
'size': get_field_size(field[2]),
'null_ok': not field[3],
'default': field[4],
'pk': field[5], # undocumented
} for field in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Sqlite3 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
return constraints
| bsd-3-clause |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/error_reporting/util.py | 2 | 2798 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Crash Reporting for Cloud SDK."""
from googlecloudsdk.core import apis as core_apis
from googlecloudsdk.core import properties
API_VERSION = 'v1beta1'
API_NAME = 'clouderrorreporting'
class ErrorReporting(object):
"""Report errors to errorreporting."""
def __init__(self, api_client=None):
self.api_client = api_client
if self.api_client is None:
self.api_client = core_apis.GetClientInstance(API_NAME, API_VERSION)
self.api_messages = self.api_client.MESSAGES_MODULE
def ReportEvent(self,
error_message,
service,
version=None,
project=None,
request_url=None,
user=None):
"""Creates a new error event and sends to StackDriver Reporting API.
Args:
error_message: str, Crash details including stacktrace
service: str, Name of service
version: str, Service version, defaults to None
project: str, Project to report errors to, defaults to current
request_url: str, The request url that led to the error
user: str, The user affected by the error
"""
service_context = self.api_messages.ServiceContext(
service=service, version=version)
error_event = self.api_messages.ReportedErrorEvent(
serviceContext=service_context, message=error_message)
if request_url or user:
error_context = self.api_messages.ErrorContext()
if request_url:
error_context.httpRequest = self.api_messages.HttpRequestContext(
url=request_url)
if user:
error_context.user = user
error_event.context = error_context
if project is None:
project = self._GetGcloudProject()
project_name = self._MakeProjectName(project)
self.api_client.projects_events.Report(
self.api_messages.ClouderrorreportingProjectsEventsReportRequest(
projectName=project_name,
reportedErrorEvent=error_event))
def _GetGcloudProject(self):
"""Gets the current project if project is not specified."""
return properties.VALUES.core.project.Get(required=True)
def _MakeProjectName(self, project):
return 'projects/' + project
| apache-2.0 |
gclenaghan/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 73 | 6086 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
chirilo/mozillians | vendor-local/lib/python/rest_framework/six.py | 33 | 11056 | """Utilities for writing code that runs on Python 2 and 3"""
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0"
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform == "java":
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules["django.utils.six.moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
### Additional customizations for Django ###
if PY3:
_iterlists = "lists"
_assertRaisesRegex = "assertRaisesRegex"
else:
_iterlists = "iterlists"
_assertRaisesRegex = "assertRaisesRegexp"
def iterlists(d):
"""Return an iterator over the values of a MultiValueDict."""
return getattr(d, _iterlists)()
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
add_move(MovedModule("_dummy_thread", "dummy_thread"))
add_move(MovedModule("_thread", "thread"))
| bsd-3-clause |
j-carpentier/nova | nova/objects/instance_mapping.py | 29 | 4924 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import base as ovo
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
# NOTE(danms): Maintain Dict compatibility because of ovo bug 1474952
@base.NovaObjectRegistry.register
class InstanceMapping(base.NovaTimestampObject, base.NovaObject,
ovo.VersionedObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.IntegerField(read_only=True),
'instance_uuid': fields.UUIDField(),
'cell_id': fields.IntegerField(),
'project_id': fields.StringField(),
}
@staticmethod
def _from_db_object(context, instance_mapping, db_instance_mapping):
for key in instance_mapping.fields:
setattr(instance_mapping, key, db_instance_mapping[key])
instance_mapping.obj_reset_changes()
instance_mapping._context = context
return instance_mapping
@staticmethod
def _get_by_instance_uuid_from_db(context, instance_uuid):
session = db_api.get_api_session()
with session.begin():
db_mapping = session.query(
api_models.InstanceMapping).filter_by(
instance_uuid=instance_uuid).first()
if not db_mapping:
raise exception.InstanceMappingNotFound(uuid=instance_uuid)
return db_mapping
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_mapping = cls._get_by_instance_uuid_from_db(context, instance_uuid)
return cls._from_db_object(context, cls(), db_mapping)
@staticmethod
def _create_in_db(context, updates):
session = db_api.get_api_session()
db_mapping = api_models.InstanceMapping()
db_mapping.update(updates)
db_mapping.save(session)
return db_mapping
@base.remotable
def create(self):
db_mapping = self._create_in_db(self._context, self.obj_get_changes())
self._from_db_object(self._context, self, db_mapping)
@staticmethod
def _save_in_db(context, instance_uuid, updates):
session = db_api.get_api_session()
with session.begin():
db_mapping = session.query(
api_models.InstanceMapping).filter_by(
instance_uuid=instance_uuid).first()
if not db_mapping:
raise exception.InstanceMappingNotFound(uuid=instance_uuid)
db_mapping.update(updates)
session.add(db_mapping)
return db_mapping
@base.remotable
def save(self):
changes = self.obj_get_changes()
db_mapping = self._save_in_db(self._context, self.instance_uuid,
changes)
self._from_db_object(self._context, self, db_mapping)
self.obj_reset_changes()
@staticmethod
def _destroy_in_db(context, instance_uuid):
session = db_api.get_api_session()
with session.begin():
result = session.query(api_models.InstanceMapping).filter_by(
instance_uuid=instance_uuid).delete()
if not result:
raise exception.InstanceMappingNotFound(uuid=instance_uuid)
@base.remotable
def destroy(self):
self._destroy_in_db(self._context, self.instance_uuid)
@base.NovaObjectRegistry.register
class InstanceMappingList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'objects': fields.ListOfObjectsField('InstanceMapping'),
}
obj_relationships = {
'objects': [('1.0', '1.0')],
}
@staticmethod
def _get_by_project_id_from_db(context, project_id):
session = db_api.get_api_session()
with session.begin():
db_mappings = session.query(api_models.InstanceMapping).filter_by(
project_id=project_id).all()
return db_mappings
@base.remotable_classmethod
def get_by_project_id(cls, context, project_id):
db_mappings = cls._get_by_project_id_from_db(context, project_id)
return base.obj_make_list(context, cls(), objects.InstanceMapping,
db_mappings)
| apache-2.0 |
lochiiconnectivity/boto | boto/core/__init__.py | 68 | 1179 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| mit |
OpenAcademy-OpenStack/nova-scheduler | nova/api/openstack/compute/contrib/security_groups.py | 4 | 26752 | # Copyright 2011 OpenStack Foundation
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The security groups extension."""
import contextlib
import json
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import api as compute_api
from nova import exception
from nova.network.security_group import neutron_driver
from nova.network.security_group import openstack_driver
from nova.openstack.common.gettextutils import _
from nova.openstack.common import xmlutils
from nova.virt import netutils
authorize = extensions.extension_authorizer('compute', 'security_groups')
softauth = extensions.soft_extension_authorizer('compute', 'security_groups')
def make_rule(elem):
elem.set('id')
elem.set('parent_group_id')
proto = xmlutil.SubTemplateElement(elem, 'ip_protocol')
proto.text = 'ip_protocol'
from_port = xmlutil.SubTemplateElement(elem, 'from_port')
from_port.text = 'from_port'
to_port = xmlutil.SubTemplateElement(elem, 'to_port')
to_port.text = 'to_port'
group = xmlutil.SubTemplateElement(elem, 'group', selector='group')
name = xmlutil.SubTemplateElement(group, 'name')
name.text = 'name'
tenant_id = xmlutil.SubTemplateElement(group, 'tenant_id')
tenant_id.text = 'tenant_id'
ip_range = xmlutil.SubTemplateElement(elem, 'ip_range',
selector='ip_range')
cidr = xmlutil.SubTemplateElement(ip_range, 'cidr')
cidr.text = 'cidr'
def make_sg(elem):
elem.set('id')
elem.set('tenant_id')
elem.set('name')
desc = xmlutil.SubTemplateElement(elem, 'description')
desc.text = 'description'
rules = xmlutil.SubTemplateElement(elem, 'rules')
rule = xmlutil.SubTemplateElement(rules, 'rule', selector='rules')
make_rule(rule)
def _authorize_context(req):
context = req.environ['nova.context']
authorize(context)
return context
sg_nsmap = {None: wsgi.XMLNS_V11}
class SecurityGroupRuleTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group_rule',
selector='security_group_rule')
make_rule(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_group',
selector='security_group')
make_sg(root)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('security_groups')
elem = xmlutil.SubTemplateElement(root, 'security_group',
selector='security_groups')
make_sg(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=sg_nsmap)
class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
dom = xmlutil.safe_minidom_parse_string(string)
security_group = {}
sg_node = self.find_first_child_named(dom,
'security_group')
if sg_node is not None:
if sg_node.hasAttribute('name'):
security_group['name'] = sg_node.getAttribute('name')
desc_node = self.find_first_child_named(sg_node,
"description")
if desc_node:
security_group['description'] = self.extract_text(desc_node)
return {'body': {'security_group': security_group}}
class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""
Deserializer to handle xml-formatted security group requests.
"""
def default(self, string):
"""Deserialize an xml-formatted security group create request."""
dom = xmlutil.safe_minidom_parse_string(string)
security_group_rule = self._extract_security_group_rule(dom)
return {'body': {'security_group_rule': security_group_rule}}
def _extract_security_group_rule(self, node):
"""Marshal the security group rule attribute of a parsed request."""
sg_rule = {}
sg_rule_node = self.find_first_child_named(node,
'security_group_rule')
if sg_rule_node is not None:
ip_protocol_node = self.find_first_child_named(sg_rule_node,
"ip_protocol")
if ip_protocol_node is not None:
sg_rule['ip_protocol'] = self.extract_text(ip_protocol_node)
from_port_node = self.find_first_child_named(sg_rule_node,
"from_port")
if from_port_node is not None:
sg_rule['from_port'] = self.extract_text(from_port_node)
to_port_node = self.find_first_child_named(sg_rule_node, "to_port")
if to_port_node is not None:
sg_rule['to_port'] = self.extract_text(to_port_node)
parent_group_id_node = self.find_first_child_named(sg_rule_node,
"parent_group_id")
if parent_group_id_node is not None:
sg_rule['parent_group_id'] = self.extract_text(
parent_group_id_node)
group_id_node = self.find_first_child_named(sg_rule_node,
"group_id")
if group_id_node is not None:
sg_rule['group_id'] = self.extract_text(group_id_node)
cidr_node = self.find_first_child_named(sg_rule_node, "cidr")
if cidr_node is not None:
sg_rule['cidr'] = self.extract_text(cidr_node)
return sg_rule
@contextlib.contextmanager
def translate_exceptions():
"""Translate nova exceptions to http exceptions."""
try:
yield
except exception.Invalid as exp:
msg = exp.format_message()
raise exc.HTTPBadRequest(explanation=msg)
except exception.SecurityGroupNotFound as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceNotFound as exp:
msg = exp.format_message()
raise exc.HTTPNotFound(explanation=msg)
except exception.SecurityGroupLimitExceeded as exp:
msg = exp.format_message()
raise exc.HTTPRequestEntityTooLarge(explanation=msg)
except exception.NoUniqueMatch as exp:
msg = exp.format_message()
raise exc.HTTPConflict(explanation=msg)
class SecurityGroupControllerBase(object):
"""Base class for Security Group controllers."""
def __init__(self):
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _format_security_group_rule(self, context, rule):
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
sg_rule['ip_protocol'] = rule['protocol']
sg_rule['from_port'] = rule['from_port']
sg_rule['to_port'] = rule['to_port']
sg_rule['group'] = {}
sg_rule['ip_range'] = {}
if rule['group_id']:
with translate_exceptions():
source_group = self.security_group_api.get(context,
id=rule['group_id'])
sg_rule['group'] = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
def _format_security_group(self, context, group):
security_group = {}
security_group['id'] = group['id']
security_group['description'] = group['description']
security_group['name'] = group['name']
security_group['tenant_id'] = group['project_id']
security_group['rules'] = []
for rule in group['rules']:
security_group['rules'] += [self._format_security_group_rule(
context, rule)]
return security_group
def _from_body(self, body, key):
if not body:
raise exc.HTTPUnprocessableEntity()
value = body.get(key, None)
if value is None:
raise exc.HTTPUnprocessableEntity()
return value
class SecurityGroupController(SecurityGroupControllerBase):
"""The Security group API controller for the OpenStack API."""
@wsgi.serializers(xml=SecurityGroupTemplate)
def show(self, req, id):
"""Return data about the given security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
return {'security_group': self._format_security_group(context,
security_group)}
def delete(self, req, id):
"""Delete a security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
self.security_group_api.destroy(context, security_group)
return webob.Response(status_int=202)
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req):
"""Returns a list of security groups."""
context = _authorize_context(req)
search_opts = {}
search_opts.update(req.GET)
with translate_exceptions():
project_id = context.project_id
raw_groups = self.security_group_api.list(context,
project=project_id,
search_opts=search_opts)
limited_list = common.limited(raw_groups, req)
result = [self._format_security_group(context, group)
for group in limited_list]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
@wsgi.serializers(xml=SecurityGroupTemplate)
@wsgi.deserializers(xml=SecurityGroupXMLDeserializer)
def create(self, req, body):
"""Creates a new security group."""
context = _authorize_context(req)
security_group = self._from_body(body, 'security_group')
group_name = security_group.get('name', None)
group_description = security_group.get('description', None)
with translate_exceptions():
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.create_security_group(
context, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
@wsgi.serializers(xml=SecurityGroupTemplate)
def update(self, req, id, body):
"""Update a security group."""
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
security_group = self.security_group_api.get(context, None, id,
map_exception=True)
security_group_data = self._from_body(body, 'security_group')
group_name = security_group_data.get('name', None)
group_description = security_group_data.get('description', None)
with translate_exceptions():
self.security_group_api.validate_property(group_name, 'name', None)
self.security_group_api.validate_property(group_description,
'description', None)
group_ref = self.security_group_api.update_security_group(
context, security_group, group_name, group_description)
return {'security_group': self._format_security_group(context,
group_ref)}
class SecurityGroupRulesController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupRuleTemplate)
@wsgi.deserializers(xml=SecurityGroupRulesXMLDeserializer)
def create(self, req, body):
context = _authorize_context(req)
sg_rule = self._from_body(body, 'security_group_rule')
with translate_exceptions():
parent_group_id = self.security_group_api.validate_id(
sg_rule.get('parent_group_id', None))
security_group = self.security_group_api.get(context, None,
parent_group_id,
map_exception=True)
try:
new_rule = self._rule_args_to_dict(context,
to_port=sg_rule.get('to_port'),
from_port=sg_rule.get('from_port'),
ip_protocol=sg_rule.get('ip_protocol'),
cidr=sg_rule.get('cidr'),
group_id=sg_rule.get('group_id'))
except Exception as exp:
raise exc.HTTPBadRequest(explanation=unicode(exp))
if new_rule is None:
msg = _("Not enough parameters to build a valid rule.")
raise exc.HTTPBadRequest(explanation=msg)
new_rule['parent_group_id'] = security_group['id']
if 'cidr' in new_rule:
net, prefixlen = netutils.get_net_and_prefixlen(new_rule['cidr'])
if net not in ('0.0.0.0', '::') and prefixlen == '0':
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
with translate_exceptions():
security_group_rule = (
self.security_group_api.create_security_group_rule(
context, security_group, new_rule))
return {"security_group_rule": self._format_security_group_rule(
context,
security_group_rule)}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
if group_id is not None:
group_id = self.security_group_api.validate_id(group_id)
# check if groupId exists
self.security_group_api.get(context, id=group_id)
return self.security_group_api.new_group_ingress_rule(
group_id, ip_protocol, from_port, to_port)
else:
cidr = self.security_group_api.parse_cidr(cidr)
return self.security_group_api.new_cidr_ingress_rule(
cidr, ip_protocol, from_port, to_port)
def delete(self, req, id):
context = _authorize_context(req)
with translate_exceptions():
id = self.security_group_api.validate_id(id)
rule = self.security_group_api.get_rule(context, id)
group_id = rule['parent_group_id']
security_group = self.security_group_api.get(context, None,
group_id,
map_exception=True)
self.security_group_api.remove_rules(context, security_group,
[rule['id']])
return webob.Response(status_int=202)
class ServerSecurityGroupController(SecurityGroupControllerBase):
@wsgi.serializers(xml=SecurityGroupsTemplate)
def index(self, req, server_id):
"""Returns a list of security groups for the given instance."""
context = _authorize_context(req)
self.security_group_api.ensure_default(context)
with translate_exceptions():
instance = self.compute_api.get(context, server_id)
groups = self.security_group_api.get_instance_security_groups(
context, instance['uuid'], True)
result = [self._format_security_group(context, group)
for group in groups]
return {'security_groups':
list(sorted(result,
key=lambda k: (k['tenant_id'], k['name'])))}
class SecurityGroupActionController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupActionController, self).__init__(*args, **kwargs)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
self.compute_api = compute.API(
security_group_api=self.security_group_api)
def _parse(self, body, action):
try:
body = body[action]
group_name = body['name']
except TypeError:
msg = _("Missing parameter dict")
raise webob.exc.HTTPBadRequest(explanation=msg)
except KeyError:
msg = _("Security group not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
if not group_name or group_name.strip() == '':
msg = _("Security group name cannot be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return group_name
def _invoke(self, method, context, id, group_name):
with translate_exceptions():
instance = self.compute_api.get(context, id)
method(context, instance, group_name)
return webob.Response(status_int=202)
@wsgi.action('addSecurityGroup')
def _addSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'addSecurityGroup')
return self._invoke(self.security_group_api.add_to_instance,
context, id, group_name)
@wsgi.action('removeSecurityGroup')
def _removeSecurityGroup(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
group_name = self._parse(body, 'removeSecurityGroup')
return self._invoke(self.security_group_api.remove_from_instance,
context, id, group_name)
class SecurityGroupsOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(SecurityGroupsOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def _extend_servers(self, req, servers):
# TODO(arosen) this function should be refactored to reduce duplicate
# code and use get_instance_security_groups instead of get_db_instance.
if not len(servers):
return
key = "security_groups"
context = _authorize_context(req)
if not openstack_driver.is_neutron_security_groups():
for server in servers:
instance = req.get_db_instance(server['id'])
groups = instance.get(key)
if groups:
server[key] = [{"name": group["name"]} for group in groups]
else:
# If method is a POST we get the security groups intended for an
# instance from the request. The reason for this is if using
# neutron security groups the requested security groups for the
# instance are not in the db and have not been sent to neutron yet.
if req.method != 'POST':
sg_instance_bindings = (
self.security_group_api
.get_instances_security_groups_bindings(context,
servers))
for server in servers:
groups = sg_instance_bindings.get(server['id'])
if groups:
server[key] = groups
# In this section of code len(servers) == 1 as you can only POST
# one server in an API request.
else:
try:
# try converting to json
req_obj = json.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
servers[0][key] = req_obj['server'].get(
key, [{'name': 'default'}])
except ValueError:
root = xmlutils.safe_minidom_parse_string(req.body)
sg_root = root.getElementsByTagName(key)
groups = []
if sg_root:
security_groups = sg_root[0].getElementsByTagName(
'security_group')
for security_group in security_groups:
groups.append(
{'name': security_group.getAttribute('name')})
if not groups:
groups = [{'name': 'default'}]
servers[0][key] = groups
def _show(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
if 'server' in resp_obj.obj:
resp_obj.attach(xml=SecurityGroupServerTemplate())
self._extend_servers(req, [resp_obj.obj['server']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not softauth(req.environ['nova.context']):
return
resp_obj.attach(xml=SecurityGroupServersTemplate())
self._extend_servers(req, list(resp_obj.obj['servers']))
class SecurityGroupsTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return "security_groups" in datum
def make_server(elem):
secgrps = SecurityGroupsTemplateElement('security_groups')
elem.append(secgrps)
secgrp = xmlutil.SubTemplateElement(secgrps, 'security_group',
selector="security_groups")
secgrp.set('name')
class SecurityGroupServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
make_server(root)
return xmlutil.SlaveTemplate(root, 1)
class SecurityGroupServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
return xmlutil.SlaveTemplate(root, 1)
class Security_groups(extensions.ExtensionDescriptor):
"""Security group support."""
name = "SecurityGroups"
alias = "os-security-groups"
namespace = "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1"
updated = "2013-05-28T00:00:00+00:00"
def get_controller_extensions(self):
controller = SecurityGroupActionController()
actions = extensions.ControllerExtension(self, 'servers', controller)
controller = SecurityGroupsOutputController()
output = extensions.ControllerExtension(self, 'servers', controller)
return [actions, output]
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-security-groups',
controller=SecurityGroupController())
resources.append(res)
res = extensions.ResourceExtension('os-security-group-rules',
controller=SecurityGroupRulesController())
resources.append(res)
res = extensions.ResourceExtension(
'os-security-groups',
controller=ServerSecurityGroupController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
class NativeSecurityGroupExceptions(object):
@staticmethod
def raise_invalid_property(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_group_already_exists(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_invalid_group(msg):
raise exception.Invalid(msg)
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
raise exception.InvalidCidr(cidr=cidr)
@staticmethod
def raise_over_quota(msg):
raise exception.SecurityGroupLimitExceeded(msg)
@staticmethod
def raise_not_found(msg):
raise exception.SecurityGroupNotFound(msg)
class NativeNovaSecurityGroupAPI(NativeSecurityGroupExceptions,
compute_api.SecurityGroupAPI):
pass
class NativeNeutronSecurityGroupAPI(NativeSecurityGroupExceptions,
neutron_driver.SecurityGroupAPI):
pass
| apache-2.0 |
endlessm/chromium-browser | third_party/catapult/telemetry/telemetry/timeline/thread.py | 1 | 9786 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import telemetry.timeline.async_slice as async_slice_module
import telemetry.timeline.event_container as event_container
import telemetry.timeline.flow_event as flow_event_module
import telemetry.timeline.sample as sample_module
import telemetry.timeline.slice as slice_module
class Thread(event_container.TimelineEventContainer):
"""A Thread stores all the trace events collected for a particular
thread. We organize the synchronous slices on a thread by "subrows," where
subrow 0 has all the root slices, subrow 1 those nested 1 deep, and so on.
The asynchronous slices are stored in an AsyncSliceGroup object.
"""
def __init__(self, process, tid):
super(Thread, self).__init__('thread %s' % tid, parent=process)
self.tid = tid
self._async_slices = []
self._flow_events = []
self._samples = []
self._toplevel_slices = []
self._all_slices = []
# State only valid during import.
self._open_slices = []
self._newly_added_slices = []
@property
def toplevel_slices(self):
return self._toplevel_slices
@property
def all_slices(self):
return self._all_slices
@property
def samples(self):
return self._samples
@property
def async_slices(self):
return self._async_slices
@property
def open_slice_count(self):
return len(self._open_slices)
def IterChildContainers(self):
return
yield # pylint: disable=unreachable
def IterEventsInThisContainer(self, event_type_predicate, event_predicate):
if event_type_predicate(slice_module.Slice):
for s in self._newly_added_slices:
if event_predicate(s):
yield s
for s in self._all_slices:
if event_predicate(s):
yield s
if event_type_predicate(async_slice_module.AsyncSlice):
for async_slice in self._async_slices:
if event_predicate(async_slice):
yield async_slice
for sub_slice in async_slice.IterEventsInThisContainerRecrusively():
if event_predicate(sub_slice):
yield sub_slice
if event_type_predicate(flow_event_module.FlowEvent):
for flow_event in self._flow_events:
if event_predicate(flow_event):
yield flow_event
if event_type_predicate(sample_module.Sample):
for sample in self._samples:
if event_predicate(sample):
yield sample
def AddSample(self, category, name, timestamp, args=None):
if len(self._samples) and timestamp < self._samples[-1].start:
raise ValueError(
'Samples must be added in increasing timestamp order')
sample = sample_module.Sample(
self, category, name, timestamp, args=args)
self._samples.append(sample)
def AddAsyncSlice(self, async_slice):
self._async_slices.append(async_slice)
def AddFlowEvent(self, flow_event):
self._flow_events.append(flow_event)
def BeginSlice(self, category, name, timestamp, thread_timestamp=None,
args=None):
"""Opens a new slice for the thread.
Calls to beginSlice and endSlice must be made with
non-monotonically-decreasing timestamps.
* category: Category to which the slice belongs.
* name: Name of the slice to add.
* timestamp: The timetsamp of the slice, in milliseconds.
* thread_timestamp: Thread specific clock (scheduled) timestamp of the
slice, in milliseconds.
* args: Arguments associated with
Returns newly opened slice
"""
if len(self._open_slices) > 0 and timestamp < self._open_slices[-1].start:
raise ValueError(
'Slices must be added in increasing timestamp order')
new_slice = slice_module.Slice(self, category, name, timestamp,
thread_timestamp=thread_timestamp,
args=args)
self._open_slices.append(new_slice)
new_slice.did_not_finish = True
self.PushSlice(new_slice)
return new_slice
def EndSlice(self, end_timestamp, end_thread_timestamp=None):
""" Ends the last begun slice in this group and pushes it onto the slice
array.
* end_timestamp: Timestamp when the slice ended in milliseconds
* end_thread_timestamp: Timestamp when the scheduled time of the slice ended
in milliseconds
returns completed slice.
"""
if not len(self._open_slices):
raise ValueError(
'EndSlice called without an open slice')
curr_slice = self._open_slices.pop()
if end_timestamp < curr_slice.start:
raise ValueError(
'Slice %s end time is before its start.' % curr_slice.name)
curr_slice.duration = end_timestamp - curr_slice.start
# On Windows, it is possible to have a value for |end_thread_timestamp|
# but not for |curr_slice.thread_start|, because it takes some time to
# initialize the thread time timer.
if curr_slice.thread_start != None and end_thread_timestamp != None:
curr_slice.thread_duration = (end_thread_timestamp -
curr_slice.thread_start)
curr_slice.did_not_finish = False
return curr_slice
def PushCompleteSlice(self, category, name, timestamp, duration,
thread_timestamp, thread_duration, args=None):
new_slice = slice_module.Slice(self, category, name, timestamp,
thread_timestamp=thread_timestamp,
args=args)
if duration is None:
new_slice.did_not_finish = True
else:
new_slice.duration = duration
new_slice.thread_duration = thread_duration
self.PushSlice(new_slice)
return new_slice
def PushMarkSlice(
self, category, name, timestamp, thread_timestamp, args=None):
new_slice = slice_module.Slice(self, category, name, timestamp,
thread_timestamp=thread_timestamp,
args=args)
self.PushSlice(new_slice)
return new_slice
def PushSlice(self, new_slice):
self._newly_added_slices.append(new_slice)
return new_slice
def AutoCloseOpenSlices(self, max_timestamp, max_thread_timestamp):
for s in self._newly_added_slices:
if s.did_not_finish:
s.duration = max_timestamp - s.start
assert s.duration >= 0
if s.thread_start != None:
s.thread_duration = max_thread_timestamp - s.thread_start
assert s.thread_duration >= 0
self._open_slices = []
def IsTimestampValidForBeginOrEnd(self, timestamp):
if not len(self._open_slices):
return True
return timestamp >= self._open_slices[-1].start
def FinalizeImport(self):
self._BuildSliceSubRows()
def _BuildSliceSubRows(self):
"""This function works by walking through slices by start time.
The basic idea here is to insert each slice as deep into the subrow
list as it can go such that every subslice is fully contained by its
parent slice.
Visually, if we start with this:
0: [ a ]
1: [ b ]
2: [c][d]
To place this slice:
[e]
We first check row 2's last item, [d]. [e] wont fit into [d] (they dont
even intersect). So we go to row 1. That gives us [b], and [d] wont fit
into that either. So, we go to row 0 and its last slice, [a]. That can
completely contain [e], so that means we should add [e] as a subslice
of [a]. That puts it on row 1, yielding:
0: [ a ]
1: [ b ][e]
2: [c][d]
If we then get this slice:
[f]
We do the same deepest-to-shallowest walk of the subrows trying to fit
it. This time, it doesn't fit in any open slice. So, we simply append
it to row 0 (a root slice):
0: [ a ] [f]
1: [ b ][e]
"""
def CompareSlices(s1, s2):
if s1.start == s2.start:
# Break ties by having the slice with the greatest
# end timestamp come first.
return cmp(s2.end, s1.end)
return cmp(s1.start, s2.start)
assert len(self._toplevel_slices) == 0
assert len(self._all_slices) == 0
if not len(self._newly_added_slices):
return
self._all_slices.extend(self._newly_added_slices)
sorted_slices = sorted(self._newly_added_slices, cmp=CompareSlices)
root_slice = sorted_slices[0]
self._toplevel_slices.append(root_slice)
for s in sorted_slices[1:]:
if not self._AddSliceIfBounds(root_slice, s):
root_slice = s
self._toplevel_slices.append(root_slice)
self._newly_added_slices = []
def _AddSliceIfBounds(self, root, child):
"""Adds a child slice to a root slice its proper row.
Return False if the child slice is not in the bounds
of the root slice.
Because we know that the start time of child is >= the start time
of all other slices seen so far, we can just check the last slice
of each row for bounding.
"""
# The source trace data is in microseconds but we store it as milliseconds
# in floating-point. Since we can't represent micros as millis perfectly,
# two end=start+duration combos that should be the same will be slightly
# different. Round back to micros to ensure equality below.
child_end_micros = round(child.end * 1000)
root_end_micros = round(root.end * 1000)
if child.start >= root.start and child_end_micros <= root_end_micros:
if len(root.sub_slices) > 0:
if self._AddSliceIfBounds(root.sub_slices[-1], child):
return True
child.parent_slice = root
root.AddSubSlice(child)
return True
return False
| bsd-3-clause |
yuanzhao/gpdb | gpMgmt/bin/gppylib/test/unit/__init__.py | 20 | 1321 | # Make sure Python loads the modules of this package via absolute paths.
from os.path import abspath as _abspath
__path__[0] = _abspath(__path__[0])
from gppylib.gparray import GpArray, GpDB
def setup_fake_gparray():
master = GpDB.initFromString("1|-1|p|p|s|u|mdw|mdw|5432|None|/data/master||/data/master/base/10899,/data/master/base/1,/data/master/base/10898,/data/master/base/25780,/data/master/base/34782")
primary0 = GpDB.initFromString("2|0|p|p|s|u|sdw1|sdw1|40000|41000|/data/primary0||/data/primary0/base/10899,/data/primary0/base/1,/data/primary0/base/10898,/data/primary0/base/25780,/data/primary0/base/34782")
primary1 = GpDB.initFromString("3|1|p|p|s|u|sdw2|sdw2|40001|41001|/data/primary1||/data/primary1/base/10899,/data/primary1/base/1,/data/primary1/base/10898,/data/primary1/base/25780,/data/primary1/base/34782")
mirror0 = GpDB.initFromString("4|0|m|m|s|u|sdw2|sdw2|50000|51000|/data/mirror0||/data/mirror0/base/10899,/data/mirror0/base/1,/data/mirror0/base/10898,/data/mirror0/base/25780,/data/mirror0/base/34782")
mirror1 = GpDB.initFromString("5|1|m|m|s|u|sdw1|sdw1|50001|51001|/data/mirror1||/data/mirror1/base/10899,/data/mirror1/base/1,/data/mirror1/base/10898,/data/mirror1/base/25780,/data/mirror1/base/34782")
return GpArray([master,primary0,primary1,mirror0,mirror1])
| apache-2.0 |
Domatix/stock-logistics-workflow | stock_batch_picking/wizard/batch_picking_creator.py | 2 | 6683 | # Copyright 2012-2016 Camptocamp SA
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
import json
from odoo import _, api, fields, models
from odoo.exceptions import UserError
class StockBatchPickingCreator(models.TransientModel):
"""Create a stock.batch.picking from stock.picking
"""
_name = 'stock.batch.picking.creator'
_description = 'Batch Picking Creator'
_group_field_param = 'stock_batch_picking.group_field'
name = fields.Char(
'Name', required=True,
default=lambda x: x.env['ir.sequence'].next_by_code(
'stock.batch.picking'
),
help='Name of the batch picking'
)
date = fields.Date(
'Date', required=True, index=True, default=fields.Date.context_today,
help='Date on which the batch picking is to be processed'
)
picker_id = fields.Many2one(
'res.users', string='Picker',
default=lambda self: self._default_picker_id(),
help='The user to which the pickings are assigned'
)
notes = fields.Text('Notes', help='free form remarks')
batch_by_group = fields.Boolean(
string='Create batch pickings grouped by fields',
)
group_field_ids = fields.One2many(
comodel_name='stock.batch.picking.creator.group.field',
inverse_name='batch_picking_creator_id',
string='Group by field',
help='If set any, multiple batch picking will be created, one per '
'group field',
)
@api.onchange('batch_by_group')
def onchange_batch_by_group(self):
if self.batch_by_group:
self.group_field_ids = False
for index, field in enumerate(self.load_store_fields()):
self.group_field_ids += self.group_field_ids.new({
'sequence': index,
'field_id': field.id,
})
def load_store_fields(self):
group_field_ids = self.env['ir.config_parameter'].sudo().get_param(
self._group_field_param)
group_fields = self.env['ir.model.fields'].browse(
group_field_ids and json.loads(group_field_ids))
return group_fields
@api.model
def default_get(self, fields):
"""
Set last grouped fields used that are stored in config parameters
"""
res = super().default_get(fields)
group_fields = self.load_store_fields()
res['batch_by_group'] = group_fields and True or False
return res
def _default_picker_id(self):
""" Return default_picker_id from the main company warehouse
except if a warehouse_id is specified in context.
"""
warehouse_id = self.env.context.get('warehouse_id')
if warehouse_id:
warehouse = self.env['stock.warehouse'].browse(warehouse_id)
else:
warehouse = self.env['stock.warehouse'].search([
('company_id', '=', self.env.user.company_id.id)
], limit=1)
return warehouse.default_picker_id
def _prepare_stock_batch_picking(self):
return {
'name': self.name,
'date': self.date,
'notes': self.notes,
'picker_id': self.picker_id.id,
}
def _raise_message_error(self):
return _(
"All selected pickings are already in a batch picking "
"or are in a wrong state."
)
def create_simple_batch(self, domain):
""" Create one batch picking with all pickings """
pickings = self.env['stock.picking'].search(domain)
if not pickings:
raise UserError(self._raise_message_error())
batch = self.env['stock.batch.picking'].create(
self._prepare_stock_batch_picking())
pickings.write({'batch_picking_id': batch.id})
return batch
def create_multiple_batch(self, domain):
""" Create n batch pickings by grouped fields selected """
StockPicking = self.env['stock.picking']
groupby = [f.field_id.name for f in self.group_field_ids]
pickings_grouped = StockPicking.read_group(domain, groupby, groupby)
if not pickings_grouped:
raise UserError(self._raise_message_error())
batchs = self.env['stock.batch.picking'].browse()
for group in pickings_grouped:
batchs += self.env['stock.batch.picking'].create(
self._prepare_stock_batch_picking())
StockPicking.search(group['__domain']).write({
'batch_picking_id': batchs[-1:].id,
})
return batchs
@api.multi
def action_create_batch(self):
""" Create a batch picking with selected pickings after having checked
that they are not already in another batch or done/cancel.
"""
domain = [
('id', 'in', self.env.context['active_ids']),
('batch_picking_id', '=', False),
('state', 'not in', ('cancel', 'done')),
]
if self.batch_by_group and self.group_field_ids:
batchs = self.create_multiple_batch(domain)
else:
batchs = self.create_simple_batch(domain)
# Store as system parameter the fields used to be loaded in the next
# execution keeping the order.
if self.batch_by_group:
group_fields = [f.field_id.id for f in self.group_field_ids]
self.env["ir.config_parameter"].sudo().set_param(
self._group_field_param, group_fields)
return self.action_view_batch_picking(batchs)
@api.multi
def action_view_batch_picking(self, batch_pickings):
if len(batch_pickings) > 1:
action = self.env.ref(
'stock_batch_picking.action_stock_batch_picking_tree').read()[
0]
action['domain'] = [('id', 'in', batch_pickings.ids)]
else:
action = batch_pickings.get_formview_action()
return action
class StockBatchPickingCreatorGroupField(models.TransientModel):
""" Make mass batch pickings from grouped fields """
_name = 'stock.batch.picking.creator.group.field'
_description = 'Batch Picking Creator Group Field'
_order = 'sequence, id'
batch_picking_creator_id = fields.Many2one(
comodel_name='stock.batch.picking.creator',
ondelete="cascade",
required=True,
)
sequence = fields.Integer(help='Group by picking field')
field_id = fields.Many2one(
comodel_name='ir.model.fields',
string='Field to group',
domain=[
('model', '=', 'stock.picking'),
('store', '=', True),
],
required=True,
)
| agpl-3.0 |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/numpy/distutils/command/__init__.py | 264 | 1098 | """distutils.command
Package containing implementation of all the standard Distutils
commands.
"""
from __future__ import division, absolute_import, print_function
def test_na_writable_attributes_deletion():
a = np.NA(2)
attr = ['payload', 'dtype']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
__revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $"
distutils_all = [ #'build_py',
'clean',
'install_clib',
'install_scripts',
'bdist',
'bdist_dumb',
'bdist_wininst',
]
__import__('distutils.command', globals(), locals(), distutils_all)
__all__ = ['build',
'config_compiler',
'config',
'build_src',
'build_py',
'build_ext',
'build_clib',
'build_scripts',
'install',
'install_data',
'install_headers',
'install_lib',
'bdist_rpm',
'sdist',
] + distutils_all
| bsd-2-clause |
kevin8909/xjerp | openerp/addons/l10n_in/__init__.py | 702 | 1046 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tiagochiavericosta/edx-platform | common/test/acceptance/pages/lms/dashboard_search.py | 138 | 1130 | """
Dashboard search
"""
from bok_choy.page_object import PageObject
from . import BASE_URL
class DashboardSearchPage(PageObject):
"""
Dashboard page featuring a search form
"""
search_bar_selector = '#dashboard-search-bar'
url = "{base}/dashboard".format(base=BASE_URL)
@property
def search_results(self):
""" search results list showing """
return self.q(css='#dashboard-search-results')
def is_browser_on_page(self):
""" did we find the search bar in the UI """
return self.q(css=self.search_bar_selector).present
def enter_search_term(self, text):
""" enter the search term into the box """
self.q(css=self.search_bar_selector + ' input[type="text"]').fill(text)
def search(self):
""" execute the search """
self.q(css=self.search_bar_selector + ' [type="submit"]').click()
self.wait_for_element_visibility('.search-info', 'Search results are shown')
def search_for_term(self, text):
"""
Search and return results
"""
self.enter_search_term(text)
self.search()
| agpl-3.0 |
jonnor/FreeCAD | src/Mod/Import/App/SCL/Rules.py | 56 | 1771 | # Copyright (c) 2011-2012, Thomas Paviot (tpaviot@gmail.com)
# All rights reserved.
# This file is part of the StepClassLibrary (SCL).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__doc__ = "This module defines EXPRESS rules"
class Rule(object):
'''
This class describes a RULE
@TODO: to be implemented
'''
pass
| lgpl-2.1 |
cgstudiomap/cgstudiomap | main/parts/odoo/addons/account/wizard/account_invoice_refund.py | 214 | 13008 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
class account_invoice_refund(osv.osv_memory):
"""Refunds invoice"""
_name = "account.invoice.refund"
_description = "Invoice Refund"
_columns = {
'date': fields.date('Date', help='This date will be used as the invoice date for credit note and period will be chosen accordingly!'),
'period': fields.many2one('account.period', 'Force period'),
'journal_id': fields.many2one('account.journal', 'Refund Journal', help='You can select here the journal to use for the credit note that will be created. If you leave that field empty, it will use the same journal as the current invoice.'),
'description': fields.char('Reason', required=True),
'filter_refund': fields.selection([('refund', 'Create a draft refund'), ('cancel', 'Cancel: create refund and reconcile'),('modify', 'Modify: create refund, reconcile and create a new draft invoice')], "Refund Method", required=True, help='Refund base on this type. You can not Modify and Cancel if the invoice is already reconciled'),
}
def _get_journal(self, cr, uid, context=None):
obj_journal = self.pool.get('account.journal')
user_obj = self.pool.get('res.users')
if context is None:
context = {}
inv_type = context.get('type', 'out_invoice')
company_id = user_obj.browse(cr, uid, uid, context=context).company_id.id
type = (inv_type == 'out_invoice') and 'sale_refund' or \
(inv_type == 'out_refund') and 'sale' or \
(inv_type == 'in_invoice') and 'purchase_refund' or \
(inv_type == 'in_refund') and 'purchase'
journal = obj_journal.search(cr, uid, [('type', '=', type), ('company_id','=',company_id)], limit=1, context=context)
return journal and journal[0] or False
def _get_reason(self, cr, uid, context=None):
active_id = context and context.get('active_id', False)
if active_id:
inv = self.pool.get('account.invoice').browse(cr, uid, active_id, context=context)
return inv.name
else:
return ''
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d'),
'journal_id': _get_journal,
'filter_refund': 'refund',
'description': _get_reason,
}
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
journal_obj = self.pool.get('account.journal')
user_obj = self.pool.get('res.users')
# remove the entry with key 'form_view_ref', otherwise fields_view_get crashes
context = dict(context or {})
context.pop('form_view_ref', None)
res = super(account_invoice_refund,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
type = context.get('type', 'out_invoice')
company_id = user_obj.browse(cr, uid, uid, context=context).company_id.id
journal_type = (type == 'out_invoice') and 'sale_refund' or \
(type == 'out_refund') and 'sale' or \
(type == 'in_invoice') and 'purchase_refund' or \
(type == 'in_refund') and 'purchase'
for field in res['fields']:
if field == 'journal_id':
journal_select = journal_obj._name_search(cr, uid, '', [('type', '=', journal_type), ('company_id','child_of',[company_id])], context=context)
res['fields'][field]['selection'] = journal_select
return res
def compute_refund(self, cr, uid, ids, mode='refund', context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: the account invoice refund’s ID or list of IDs
"""
inv_obj = self.pool.get('account.invoice')
reconcile_obj = self.pool.get('account.move.reconcile')
account_m_line_obj = self.pool.get('account.move.line')
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
inv_tax_obj = self.pool.get('account.invoice.tax')
inv_line_obj = self.pool.get('account.invoice.line')
res_users_obj = self.pool.get('res.users')
if context is None:
context = {}
for form in self.browse(cr, uid, ids, context=context):
created_inv = []
date = False
period = False
description = False
company = res_users_obj.browse(cr, uid, uid, context=context).company_id
journal_id = form.journal_id.id
for inv in inv_obj.browse(cr, uid, context.get('active_ids'), context=context):
if inv.state in ['draft', 'proforma2', 'cancel']:
raise osv.except_osv(_('Error!'), _('Cannot %s draft/proforma/cancel invoice.') % (mode))
if inv.reconciled and mode in ('cancel', 'modify'):
raise osv.except_osv(_('Error!'), _('Cannot %s invoice which is already reconciled, invoice should be unreconciled first. You can only refund this invoice.') % (mode))
if form.period.id:
period = form.period.id
else:
period = inv.period_id and inv.period_id.id or False
if not journal_id:
journal_id = inv.journal_id.id
if form.date:
date = form.date
if not form.period.id:
cr.execute("select name from ir_model_fields \
where model = 'account.period' \
and name = 'company_id'")
result_query = cr.fetchone()
if result_query:
cr.execute("""select p.id from account_fiscalyear y, account_period p where y.id=p.fiscalyear_id \
and date(%s) between p.date_start AND p.date_stop and y.company_id = %s limit 1""", (date, company.id,))
else:
cr.execute("""SELECT id
from account_period where date(%s)
between date_start AND date_stop \
limit 1 """, (date,))
res = cr.fetchone()
if res:
period = res[0]
else:
date = inv.date_invoice
if form.description:
description = form.description
else:
description = inv.name
if not period:
raise osv.except_osv(_('Insufficient Data!'), \
_('No period found on the invoice.'))
refund_id = inv_obj.refund(cr, uid, [inv.id], date, period, description, journal_id, context=context)
refund = inv_obj.browse(cr, uid, refund_id[0], context=context)
inv_obj.write(cr, uid, [refund.id], {'date_due': date,
'check_total': inv.check_total})
inv_obj.button_compute(cr, uid, refund_id)
created_inv.append(refund_id[0])
if mode in ('cancel', 'modify'):
movelines = inv.move_id.line_id
to_reconcile_ids = {}
for line in movelines:
if line.account_id.id == inv.account_id.id:
to_reconcile_ids.setdefault(line.account_id.id, []).append(line.id)
if line.reconcile_id:
line.reconcile_id.unlink()
refund.signal_workflow('invoice_open')
refund = inv_obj.browse(cr, uid, refund_id[0], context=context)
for tmpline in refund.move_id.line_id:
if tmpline.account_id.id == inv.account_id.id:
to_reconcile_ids[tmpline.account_id.id].append(tmpline.id)
for account in to_reconcile_ids:
account_m_line_obj.reconcile(cr, uid, to_reconcile_ids[account],
writeoff_period_id=period,
writeoff_journal_id = inv.journal_id.id,
writeoff_acc_id=inv.account_id.id
)
if mode == 'modify':
invoice = inv_obj.read(cr, uid, [inv.id],
['name', 'type', 'number', 'reference',
'comment', 'date_due', 'partner_id',
'partner_insite', 'partner_contact',
'partner_ref', 'payment_term', 'account_id',
'currency_id', 'invoice_line', 'tax_line',
'journal_id', 'period_id'], context=context)
invoice = invoice[0]
del invoice['id']
invoice_lines = inv_line_obj.browse(cr, uid, invoice['invoice_line'], context=context)
invoice_lines = inv_obj._refund_cleanup_lines(cr, uid, invoice_lines, context=context)
tax_lines = inv_tax_obj.browse(cr, uid, invoice['tax_line'], context=context)
tax_lines = inv_obj._refund_cleanup_lines(cr, uid, tax_lines, context=context)
invoice.update({
'type': inv.type,
'date_invoice': date,
'state': 'draft',
'number': False,
'invoice_line': invoice_lines,
'tax_line': tax_lines,
'period_id': period,
'name': description
})
for field in ('partner_id', 'account_id', 'currency_id',
'payment_term', 'journal_id'):
invoice[field] = invoice[field] and invoice[field][0]
inv_id = inv_obj.create(cr, uid, invoice, {})
if inv.payment_term.id:
data = inv_obj.onchange_payment_term_date_invoice(cr, uid, [inv_id], inv.payment_term.id, date)
if 'value' in data and data['value']:
inv_obj.write(cr, uid, [inv_id], data['value'])
created_inv.append(inv_id)
xml_id = (inv.type == 'out_refund') and 'action_invoice_tree1' or \
(inv.type == 'in_refund') and 'action_invoice_tree2' or \
(inv.type == 'out_invoice') and 'action_invoice_tree3' or \
(inv.type == 'in_invoice') and 'action_invoice_tree4'
result = mod_obj.get_object_reference(cr, uid, 'account', xml_id)
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
invoice_domain = eval(result['domain'])
invoice_domain.append(('id', 'in', created_inv))
result['domain'] = invoice_domain
return result
def invoice_refund(self, cr, uid, ids, context=None):
data_refund = self.read(cr, uid, ids, ['filter_refund'],context=context)[0]['filter_refund']
return self.compute_refund(cr, uid, ids, data_refund, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
saguas/jasper_erpnext_report | jasper_erpnext_report/utils/jasper_iter_hooks.py | 1 | 1437 | from __future__ import unicode_literals
__author__ = 'luissaguas'
import frappe
from frappe import _
"""
HOOKS:
jasper_after_sendmail(data, url, file_name, file_path); jasper_before_sendmail(data, file_name, output, url, **kargs);
jasper_after_get_report(file_name, file_output, url, filepath); jasper_before_get_report(data);
jasper_after_list_for_doctype(doctype, docnames, report, lista); jasper_before_list_for_doctype(doctype, docnames, report);
jasper_after_list_for_all(lista); jasper_before_list_for_all();
jasper_scriptlet(JavaScriptlet, ids=None, data=None, cols=None, doctype=None, docname=None);
"""
class JasperHooks:
def __init__(self, hook_name, docname=None, fallback=None):
self.hook_name = hook_name
self.current = 0
self.methods = frappe.get_hooks().get(self.hook_name) or (fallback if fallback is not None else [])
if isinstance(self.methods, dict):
if docname in self.methods.keys():
self.methods = self.methods[docname]
else:
self.methods = fallback if fallback is not None else []
self.methods_len = len(self.methods)
def __iter__(self):
return self
def next(self):
if self.current >= self.methods_len:
raise StopIteration
else:
return self.get_next_jasper_hook_method()
def get_next_jasper_hook_method(self):
if self.methods_len > 0:
curr_method = frappe.get_attr(self.methods[self.current])
self.current += 1
return curr_method
return None
| mit |
ulissesf/soletta | data/scripts/sol-flow-node-type-stub-gen.py | 3 | 16441 | #!/usr/bin/env python3
# This file is part of the Soletta Project
#
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import os
import re
class StubError(Exception):
pass
def c_clean(string):
return re.sub('[^A-Za-z0-9_]', '_', string)
def is_custom(typename):
return typename[:7] == "custom:"
def custom_get_name(typename):
return c_clean(typename[7:].lower())
data_type_to_c_map = {
"boolean": "bool ",
"blob": "struct sol_blob *",
"byte": "unsigned char ",
"int": "struct sol_irange ",
"float": "struct sol_drange ",
"rgb": "struct sol_rgb ",
"direction-vector": "struct sol_direction_vector ",
"string": "const char *",
"error": "int code_value; const char *",
}
def data_type_to_c(typename):
if (is_custom(typename)):
cname = custom_get_name(typename)
return "struct " + cname + "_packet_data "
return data_type_to_c_map[typename]
def load_json(infile, prefix):
data = json.load(infile)
# TODO: validate data
prefix_c = ""
if prefix:
prefix_c = c_clean(prefix)
name_c = c_clean(data["name"].lower())
if prefix_c:
name_c = prefix_c + "_" + name_c
data["name_c"] = name_c
data["NAME_C"] = name_c.upper()
data["prefix_c"] = prefix_c
return data
def find_methods(port_methods, data_type, method_type, methods):
method_name = port_methods.get(method_type)
if method_name:
method = methods.get(method_name, {})
if data_type:
method[data_type] = True
else:
method["any"] = True
methods[method_name] = method
def get_single_packet_type(packets):
if "any" in packets or "empty" in packets:
return None;
if len(packets) > 1:
return None;
return list(packets.keys())[0]
def print_data_struct(outfile, struct):
if struct:
outfile.write("""\
struct %s *mdata = data;
""" % struct)
data_type_to_packet_getter_map = {
"boolean": "boolean(packet, &in_value)",
"blob": "blob(packet, &in_value)",
"int": "irange(packet, &in_value)",
"float": "drange(packet, &in_value)",
"string": "string(packet, &in_value)",
"byte": "byte(packet, &in_value)",
"error": "error(packet, &code_value, &in_value)",
"rgb": "rgb(packet, &in_value)",
"direction-vector": "direction_vector(packet, &in_value)",
}
def data_type_to_packet_getter(typename):
if (is_custom(typename)):
return "packet_get_" + custom_get_name(typename) + \
"(packet /* TODO: add args */)"
return "sol_flow_packet_get_" + data_type_to_packet_getter_map[typename]
def get_base_name(base_name):
if base_name[-5:] != '.json':
raise StubError("Description files not using .json extension '%s'." %
base_name)
base_name = base_name[:-5]
return os.path.basename(base_name)
def include_source(outfile, base_name):
outfile.write("""\
#include "%s"
""" % (base_name + '-gen.c'))
def include_header(outfile, base_name):
outfile.write("""\
#include "%s"
""" % (base_name + '-gen.h'))
def add_empty_line(outfile):
outfile.write("""\
""")
def license_header(outfile):
outfile.write("""\
/*
* This file is part of the Soletta Project
*
* Copyright (C) 2015 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
""")
def include_common_headers(outfile):
outfile.write("""\
#include "sol-flow-internal.h"
#include <sol-util.h>
#include <errno.h>
""")
def declare_packet(outfile, port, packets):
data_type = port.get("data_type");
if (is_custom(data_type)):
packet_name = custom_get_name(data_type)
if packet_name not in packets:
packets.append(packet_name)
outfile.write("""\
struct %(name_data)s {
/* TODO: add packet struct fields */
};
static void
%(name)s_packet_dispose(const struct sol_flow_packet_type *packet_type,
void *mem)
{
struct %(name_data)s *%(name)s = mem;
/* TODO: free fields alloced memory */
}
static int
%(name)s_packet_init(const struct sol_flow_packet_type *packet_type,
void *mem, const void *input)
{
const struct %(name_data)s *in = input;
struct %(name_data)s *%(name)s = mem;
/* TODO: initialize fields with input content */
return 0;
}
#define %(NAME)s_PACKET_TYPE_API_VERSION (1)
static const struct sol_flow_packet_type _%(NAME)s = {
.api_version = %(NAME)s_PACKET_TYPE_API_VERSION,
.name = "%(NAME)s",
.data_size = sizeof(struct %(name_data)s),
.init = %(name)s_packet_init,
.dispose = %(name)s_packet_dispose,
};
static const struct sol_flow_packet_type *%(NAME)s =
&_%(NAME)s;
#undef %(NAME)s_PACKET_TYPE_API_VERSION
static struct sol_flow_packet *
packet_new_%(name)s(/* TODO: args to fill fields */)
{
struct %(name_data)s %(name)s;
/* TODO: check for args validity and fill fields */
return sol_flow_packet_new(%(NAME)s, &%(name)s);
}
static int
packet_get_%(name)s(const struct sol_flow_packet *packet
/* TODO: args to get fields values */)
{
struct %(name_data)s %(name)s;
int ret;
SOL_NULL_CHECK(packet, -EBADR);
if (sol_flow_packet_get_type(packet) != %(NAME)s)
return -EINVAL;
ret = sol_flow_packet_get(packet, &%(name)s);
SOL_INT_CHECK(ret, != 0, ret);
/* TODO: set args with fields values */
return ret;
}
static int
send_%(name)s_packet(struct sol_flow_node *src, uint16_t src_port
/* TODO: args to create a new packet */)
{
struct sol_flow_packet *packet;
int ret;
packet = packet_new_%(name)s(/* TODO: args */);
SOL_NULL_CHECK(packet, -ENOMEM);
ret = sol_flow_send_packet(src, src_port, packet);
if (ret != 0)
sol_flow_packet_del(packet);
return ret;
}
""" % {
"name": packet_name,
"NAME": packet_name.upper(),
"name_data": packet_name + "_packet_data"
})
def declare_packets(outfile, data, packets):
if "in_ports" in data:
for port in data["in_ports"]:
declare_packet(outfile, port, packets)
if "out_ports" in data:
for port in data["out_ports"]:
declare_packet(outfile, port, packets)
def declare_structs(outfile, data, structs):
# declare empty struct
struct = data.get("private_data_type", None)
if struct and struct not in structs:
outfile.write("""\
struct %s {
/* TODO: add struct fields */
};
""" % struct)
structs.append(struct)
def declare_methods(outfile, data, methods):
struct = data.get("private_data_type", None)
method_open = None
method_close = None
if "methods" in data:
method_open = data["methods"].get("open")
method_close = data["methods"].get("close")
# declare open method
if method_open and method_open not in methods:
outfile.write("""\
static int
%s(struct sol_flow_node *node, void *data, const struct sol_flow_node_options *options)
{
""" % method_open)
print_data_struct(outfile, struct)
if "options" in data:
outfile.write("""\
const struct %(name_c)s_options *opts;
SOL_FLOW_NODE_OPTIONS_SUB_API_CHECK(options, %(NAME_C)s_OPTIONS_API_VERSION,
-EINVAL);
opts = (const struct %(name_c)s_options *)options;
""" % {
"name_c": data["name_c"],
"NAME_C": data["NAME_C"],
})
outfile.write("""\
/* TODO: implement open method */
return 0;
}
""")
methods.append(method_open)
# declare close method
if method_close and method_close not in methods:
outfile.write("""\
static void
%s(struct sol_flow_node *node, void *data)
{
""" % method_close)
print_data_struct(outfile, struct)
outfile.write("""\
/* TODO: implement close method */
}
""")
methods.append(method_close)
methods_connect = {}
methods_disconnect = {}
methods_process = {}
if "in_ports" in data:
for o in data["in_ports"]:
port_methods = o.get("methods", {})
data_type = o.get("data_type");
find_methods(port_methods, data_type, "process", methods_process)
find_methods(port_methods, data_type,"connect", methods_connect)
find_methods(port_methods, data_type, "disconnect",
methods_disconnect)
if "out_ports" in data:
for o in data["out_ports"]:
port_methods = o.get("methods", {})
data_type = o.get("data_type");
find_methods(port_methods, data_type,"connect", methods_connect)
find_methods(port_methods, data_type, "disconnect",
methods_disconnect)
for method_connect in methods_connect:
if method_connect in methods:
continue
methods.append(method_connect)
outfile.write("""\
static int
%s(struct sol_flow_node *node, void *data, uint16_t port, uint16_t conn_id)
{
""" % method_connect)
print_data_struct(outfile, struct)
outfile.write("""\
/* TODO: implement connect method */
return 0;
}
""")
for method_disconnect in methods_disconnect:
if method_disconnect in methods:
continue
methods.append(method_disconnect)
outfile.write("""\
static int
%s(struct sol_flow_node *node, void *data, uint16_t port, uint16_t conn_id)
{
""" % method_disconnect)
print_data_struct(outfile, struct)
outfile.write("""\
/* TODO: implement disconnect method */
return 0;
}
""")
for method_process in methods_process:
if method_process in methods:
continue
methods.append(method_process)
outfile.write("""\
static int
%s(struct sol_flow_node *node, void *data, uint16_t port, uint16_t conn_id, const struct sol_flow_packet *packet)
{
""" % method_process)
print_data_struct(outfile, struct)
single_type = get_single_packet_type(methods_process[method_process])
if single_type:
outfile.write("""\
int r;
%sin_value;
r = %s;
SOL_INT_CHECK(r, < 0, r);
""" % (data_type_to_c(single_type),
data_type_to_packet_getter(single_type)))
outfile.write("""\
/* TODO: implement process method */
return 0;
}
""")
def generate_stub(stub_file, inputs_list, prefix, is_module):
data = []
base_names = []
methods = []
# blacklist our types that are often used
structs = ['sol_drange', 'sol_irange', 'sol_rgb', 'sol_direction_vector' ]
packets = []
try:
for input_json in inputs_list:
data.append(load_json(input_json, prefix))
base_names.append(get_base_name(input_json.name))
except:
raise
license_header(stub_file)
for base_name in base_names:
include_header(stub_file, base_name)
include_common_headers(stub_file)
add_empty_line(stub_file)
# declare all custom packets - structs and functions
for data_item in data:
if not "types" in data_item:
declare_packets(stub_file, data_item, packets)
else:
for t in data_item["types"]:
declare_packets(stub_file, t, packets)
# declare private data structs
for data_item in data:
if not "types" in data_item:
declare_structs(stub_file, data_item, structs)
else:
for t in data_item["types"]:
node_name = c_clean(t["name"].split("/")[1])
t["name_c"] = data_item["name_c"]+"_" + node_name
t["NAME_C"] = t["name_c"].upper()
t["prefix_c"] = data_item["prefix_c"]
declare_structs(stub_file, t, structs)
# declare all node and port methods
for data_item in data:
if not "types" in data_item:
declare_methods(stub_file, data_item, methods)
else:
for t in data_item["types"]:
declare_methods(stub_file, t, methods)
for base_name in base_names:
include_source(stub_file, base_name)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--prefix",
help="Prefix to use in C generated code",
type=str)
parser.add_argument("--module",
help="It'll generate a module instead of builtin node types.",
type=bool)
parser.add_argument("--force",
help="Force stub file rewrite.",
default=False,
type=bool)
parser.add_argument("output_stub",
help="Output stub code (.c) file",
type=str)
parser.add_argument("inputs_list", nargs='+',
help="List of input description files in JSON format",
type=argparse.FileType('r'))
args = parser.parse_args()
if os.path.exists(args.output_stub) and not args.force:
raise StubError("Can't overwrite stub file '%s'. Remove it yourself." %
args.output_stub)
try:
stub_file = open(args.output_stub, 'w')
except:
raise
try:
generate_stub(stub_file, args.inputs_list, args.prefix, args.module)
except:
raise
finally:
stub_file.close()
| bsd-3-clause |
MySmile/sfchat | apps/csp/tests/test_view.py | 1 | 2904 | import unittest
import json
from django.test.client import Client
class CSPReportTest(unittest.TestCase):
def setUp(self):
self.client = Client()
self.csp_report_valid = {"csp-report":
{"blocked-uri":"self",
"document-uri":"https://sfchat.mysmile.com.ua/",
"original-policy":"default-src https://sfchat.mysmile.com.ua data: https://www.google-analytics.com; script-src https://sfchat.mysmile.com.ua https://www.google-analytics.com; object-src 'none'; style-src https://sfchat.mysmile.com.ua 'unsafe-inline'; img-src https://sfchat.mysmile.com.ua data: https://www.google-analytics.com; media-src 'none'; frame-src 'none'; font-src 'none'; connect-src https://sfchat.mysmile.com.ua https://www.google-analytics.com; report-uri https://sfchat.mysmile.com.ua/csp-report/",
"referrer":"https://sfchat.mysmile.com.ua/chat/5587085c55e430296d487d11",
"violated-directive":"script-src https://sfchat.mysmile.com.ua https://www.google-analytics.com"
}
}
self.csp_report_invalid = {"csp":{}}
def test_view(self):
response = self.client.post('/csp-report/',
json.dumps(self.csp_report_valid),
content_type='application/csp-report',
HTTP_USER_AGENT='Mozilla/5.0',
follow=True,
secure=True)
self.assertEqual(response.status_code, 204)
def test_view_header_failed(self):
lambda_validate_header = lambda: self.client.post('/csp-report/',
json.dumps(self.csp_report_valid),
content_type='application/json',
HTTP_USER_AGENT='Mozilla/5.0',
follow=True,
secure=True)
self.assertRaises(AttributeError, lambda_validate_header)
def test_view_json_failed(self):
lambda_validate_json = lambda: self.client.post('/csp-report/',
json.dumps(self.csp_report_invalid),
content_type='application/csp-report',
HTTP_USER_AGENT='Mozilla/5.0',
follow=True,
secure=True)
self.assertRaises(AttributeError, lambda_validate_json)
| bsd-3-clause |
tbtraltaa/medianshape | medianshape/simplicial/surfgen.py | 1 | 10038 | # encoding: utf-8
'''
2D surface embedded in 3D
-------------------------
'''
from __future__ import absolute_import
import importlib
import os
import numpy as np
from medianshape.simplicial import pointgen3d, mesh, utils
from medianshape.simplicial.meshgen import meshgen2d
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from medianshape.viz import plot2d, plot3d
from distmesh.plotting import axes_simpplot3d
from medianshape.simplicial.utils import boundary_points
def func(x, y, sign=1):
'''
:math:`\sin\pi x \cos \pi y`.
'''
return np.sin(np.pi*x)*np.cos(np.pi*y)
def sample_surf(scale, step=0.2):
'''
Returns a tuple X, Y, Z of a surface for an experiment.
'''
x = y = np.arange(-4.0, 4.0, step)
X, Y = np.meshgrid(x, y)
from matplotlib.mlab import bivariate_normal
'''
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
#Z3 = bivariate_normal(X, Y, 1, 1, -2, -2)
Z = Z2 - Z1
'''
# Ups
ZU1 = bivariate_normal(X,Y, 1.5, 1, 0,-2)
ZU2 = bivariate_normal(X, Y, 1.5, 1.5, 4, 1)
ZU3 = bivariate_normal(X, Y, 1, 1, -4, 1)
#ZU4 = bivariate_normal(X, Y, 1.5, 1.5, -4, -4)
#ZU5 = bivariate_normal(X, Y, 1, 1, 4, -4)
ZU4 = bivariate_normal(X, Y, 4, 0.5, 0, -4)
# Downs
ZD1 = bivariate_normal(X, Y, 1.5, 1, 0, 1)
ZD2 = bivariate_normal(X, Y, 1.5, 1.5, -4, -2)
ZD3 = bivariate_normal(X, Y, 1, 1, 4, -2)
ZD4 = bivariate_normal(X, Y, 4, 1, 0, 4)
Z1 = ZU1 + ZU2 + ZU3 - ZD1 - ZD2 - ZD3 - ZD4
Zmax1 = np.abs(np.amax(Z1))
Z1 = Z1/Zmax1 * scale[2]
# Visualization
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(X, Y, Z1, rstride=1, cstride=1, cmap=cm.winter,
linewidth=0, antialiased=False)
plt.show()
# Ups
ZU1 = bivariate_normal(X,Y, 2, 1, 1,1)
ZU2 = bivariate_normal(X, Y, 3, 1, -2, 4)
ZU3 = bivariate_normal(X, Y, 1.5, 1.5, -2, -2)
#ZU4 = bivariate_normal(X, Y, 1.5, 1.5, -4, -4)
#ZU5 = bivariate_normal(X, Y, 1, 1, 4, -4)
ZU4 = bivariate_normal(X, Y, 2, 2, 3, -4)
# Downs
ZD1 = bivariate_normal(X, Y, 1, 2, 4, 2)
ZD2 = bivariate_normal(X, Y, 1.5, 1.5, -2, 2)
ZD3 = bivariate_normal(X, Y, 1.5, 1.5, 1, -2)
ZD4 = bivariate_normal(X, Y, 4, 1, 0, -4)
Z2 = ZU1 + ZU2 + ZU3 - ZD1 - ZD2 - ZD3 - ZD4
Zmax2 = np.abs(np.amax(Z2))
Z2 = Z2/Zmax2 * scale[2]
X = X * scale[0]/4.0
Y = Y * scale[1]/4.0
# Visualization
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(X, Y, Z2, rstride=1, cstride=1, cmap=cm.winter,
linewidth=0, antialiased=False)
plt.show()
return X, Y, Z1, Z2
def interpolate_surf(points, values, ipoints, method = "nearest"):
from scipy.interpolate import griddata
'''
Used to interpolate a sample surface to a surface in a mesh.
'''
return griddata(points, values, ipoints, method= method)
def surfgen_shared_boundary(bbox=[-10,-10,-10, 10,10,10], l=3):
'''
Generates two surfaces in 3D with shared boundary for an experiment.
Writes the two surface as .poly file for tetgen.
'''
# Generating point grids for two surfaces
xmin = bbox[0]
xmax = bbox[3]
ymin = bbox[1]
ymax = bbox[4]
zmin = bbox[2]
zmax = bbox[5]
Xmin, Ymin, Zmin, Xmax, Ymax, Zmax = np.array(bbox)*0.8
X, Y, Z1, Z2 = sample_surf([Xmax, Ymax, zmax*0.3], step=0.8)
Z1 = Z1 + zmax*0.4
Z2 = Z2 - zmax*0.4
#Symmertic surfs
#Z2 = -Z1 - zmax*0.4
'''
# Plotting the two surfaces
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.scatter(X, Y, Z1.reshape(-1,1), color='b')
surf = ax.scatter(X, Y, Z2.reshape(-1,1), color='r')
plt.show()
'''
mesh = meshgen2d([Xmin, Ymin, Xmax, Ymax], l, include_corners=True)
sample_points = np.hstack((X.reshape(-1,1), Y.reshape(-1,1)))
# Interpolating the surface mesh into two different surfaces
# similar to the the sample surfaces generated before
Z1 = interpolate_surf(sample_points, Z1.reshape(-1,1), mesh.points)
Z2 = interpolate_surf(sample_points, Z2.reshape(-1,1), mesh.points)
# Integrating two surfaces
points1 = np.hstack((mesh.points, Z1))
print points1.shape
points2 = np.hstack((mesh.points, Z2))
print points2.shape
corners = utils.boundary_points(bbox)
midcorners = utils.mid_corners(bbox)
offset1 = len(corners) +len(midcorners) + 1
offset2 = len(corners) + len(midcorners) + len(points1) + 1
points = np.concatenate((corners, midcorners, points1, points2), axis=0)
print points.shape
triangles1 = mesh.simplices + offset1
triangles2 = mesh.simplices + offset2
# Adding the indices of the points as the last column of the coordainate list
Xmin_s1 = np.argwhere(points1[:,0]==Xmin)
Xmin_s1_points = np.hstack((points1[Xmin_s1.reshape(-1,)], Xmin_s1))
# Sorting the indices such that the points are in increasing order of its y-component
Xmin_s1 = (Xmin_s1_points[:,3][np.argsort(Xmin_s1_points[:,1])] + offset1).astype(int)
Xmin_s2 = np.argwhere(points2[:,0]==Xmin)
Xmin_s2_points = np.hstack((points2[Xmin_s2.reshape(-1,)], Xmin_s2))
Xmin_s2 = (Xmin_s2_points[:,3][np.argsort(Xmin_s2_points[:,1])] + offset2).astype(int)
Xmax_s1 = np.argwhere(points1[:,0]==Xmax)
Xmax_s1_points = np.hstack((points1[Xmax_s1.reshape(-1,)], Xmax_s1))
Xmax_s1 = (Xmax_s1_points[:,3][np.argsort(Xmax_s1_points[:,1])] + offset1).astype(int)
Xmax_s2 = np.argwhere(points2[:,0]==Xmax)
Xmax_s2_points = np.hstack((points2[Xmax_s2.reshape(-1,)], Xmax_s2))
Xmax_s2 = (Xmax_s2_points[:,3][np.argsort(Xmax_s2_points[:,1])] + offset2).astype(int)
Ymin_s1 = np.argwhere(points1[:,1]==Ymin)
Ymin_s1_points = np.hstack((points1[Ymin_s1.reshape(-1,)], Ymin_s1))
Ymin_s1 = (Ymin_s1_points[:,3][np.argsort(Ymin_s1_points[:,0])] + offset1).astype(int)
Ymin_s2 = np.argwhere(points2[:,1]==Ymin)
Ymin_s2_points = np.hstack((points2[Ymin_s2.reshape(-1,)], Ymin_s2))
Ymin_s2 = (Ymin_s2_points[:,3][np.argsort(Ymin_s2_points[:,0])] + offset2).astype(int)
Ymax_s1 = np.argwhere(points1[:,1]==Ymax)
Ymax_s1_points = np.hstack((points1[Ymax_s1.reshape(-1,)], Ymax_s1))
Ymax_s1 = (Ymax_s1_points[:,3][np.argsort(Ymax_s1_points[:,0])] + offset1).astype(int)
Ymax_s2 = np.argwhere(points2[:,1]==Ymax)
Ymax_s2_points = np.hstack((points2[Ymax_s2.reshape(-1,)], Ymax_s2))
Ymax_s2 = (Ymax_s2_points[:,3][np.argsort(Ymax_s2_points[:,0])] + offset2).astype(int)
for i in range(len(Xmin_s1)-1):
triangles1 = np.vstack((triangles1, [9, Xmin_s1[i], Xmin_s1[i+1]]))
triangles1 = np.vstack((triangles1, [9, Xmin_s1[-1], 12]))
for i in range(len(Xmin_s2)-1):
triangles2 = np.vstack((triangles2, [9, Xmin_s2[i], Xmin_s2[i+1]]))
triangles2 = np.vstack((triangles2, [9, Xmin_s2[-1], 12]))
for i in range(len(Xmax_s1)-1):
triangles1 = np.vstack((triangles1, [10, Xmax_s1[i], Xmax_s1[i+1]]))
triangles1 = np.vstack((triangles1, [10, Xmax_s1[-1], 11]))
for i in range(len(Xmax_s2)-1):
triangles2 = np.vstack((triangles2, [10, Xmax_s2[i], Xmax_s2[i+1]]))
triangles2 = np.vstack((triangles2, [10, Xmax_s2[-1], 11]))
for i in range(len(Ymin_s1)-1):
triangles1 = np.vstack((triangles1, [9, Ymin_s1[i], Ymin_s1[i+1]]))
triangles1 = np.vstack((triangles1, [9, Ymin_s1[-1], 10]))
for i in range(len(Ymin_s2)-1):
triangles2 = np.vstack((triangles2, [9, Ymin_s2[i], Ymin_s2[i+1]]))
triangles2 = np.vstack((triangles2, [9, Ymin_s2[-1], 10]))
for i in range(len(Ymax_s1)-1):
triangles1 = np.vstack((triangles1, [12, Ymax_s1[i], Ymax_s1[i+1]]))
triangles1 = np.vstack((triangles1, [12, Ymax_s1[-1], 11]))
for i in range(len(Ymax_s2)-1):
triangles2 = np.vstack((triangles2, [12, Ymax_s2[i], Ymax_s2[i+1]]))
triangles2 = np.vstack((triangles2, [12, Ymax_s2[-1], 11]))
triangles = np.vstack((triangles1, triangles2))
# Preparing PLC and save it to .poly file for tetgen
with open( os.environ['HOME'] +'/mediansurf.poly', 'w') as f:
f.write("#Part 1 - the node list\n")
f.write("#%d nodes in 3d, no attributes, no boundary marker\n"%points.shape[0])
f.write('%d %d %d %d\n'%(points.shape[0], 3, 0,0))
for i, p in enumerate(points):
f.write("%d %f %f %f\n"%(i+1, p[0], p[1], p[2]))
# Each 4 sides has 3 polygons
# Top and bottom
# Each triangle of the two surfaces are facets
fn = 6 + len(triangles)
f.write("#Part 2 - the facet list.\n")
f.write("#%d facets with boundary markers\n"%fn)
f.write('%d %d\n'%(fn, 1))
f.write("#Boundary facet list.\n")
f.write("%d %d %d\n"%(1, 0, 1))
f.write("4 1 2 3 4\n")
f.write("%d %d %d\n"%(1, 0, 1))
f.write("4 5 6 7 8\n")
#xmin side
f.write("2 0 1\n")
f.write("4 1 4 8 5\n")
f.write("2 9 12\n")
#ymin side
f.write("2 0 1\n")
f.write("4 1 2 6 5\n")
f.write("2 9 10\n")
#xmax side
f.write("2 0 1\n")
f.write("4 2 3 7 6\n")
f.write("2 10 11\n")
#ymax side
f.write("2 0 1\n")
f.write("4 3 4 8 7\n")
f.write("2 11 12\n")
f.write("#Facet list of surface1.\n")
for t in triangles1:
f.write("%d %d %d\n"%(1, 0, -1))
f.write("%d %d %d %d\n"%(3, t[0], t[1], t[2]))
f.write("#Facet list of surface2.\n")
for t in triangles2:
f.write("%d %d %d\n"%(1, 0, -2))
f.write("%d %d %d %d\n"%(3, t[0], t[1], t[2]))
f.write("#Part 3 - the hole list.\n")
f.write('%d\n'%0)
f.write("#Part 4 - the region list.\n")
f.write('%d\n'%0)
if __name__ == "__main__":
surfgen_shared_boundary()
| gpl-3.0 |
broadinstitute/PyGithub | github/GitAuthor.py | 74 | 3048 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
class GitAuthor(github.GithubObject.NonCompletableGithubObject):
"""
This class represents GitAuthors as returned for example by http://developer.github.com/v3/todo
"""
@property
def date(self):
"""
:type: datetime.datetime
"""
return self._date.value
@property
def email(self):
"""
:type: string
"""
return self._email.value
@property
def name(self):
"""
:type: string
"""
return self._name.value
def _initAttributes(self):
self._date = github.GithubObject.NotSet
self._email = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "date" in attributes: # pragma no branch
self._date = self._makeDatetimeAttribute(attributes["date"])
if "email" in attributes: # pragma no branch
self._email = self._makeStringAttribute(attributes["email"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
| gpl-3.0 |
rbharath/pande-gas | vs_utils/utils/dragon_utils.py | 3 | 5800 | """
Dragon utilities.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "BSD 3-clause"
from cStringIO import StringIO
import numpy as np
import os
import pandas as pd
import subprocess
import tempfile
from vs_utils.utils import SmilesGenerator
class Dragon(object):
"""
Wrapper for dragon6shell.
Parameters
----------
subset : str, optional (default '2d')
Descriptor subset.
kwargs : dict, optional
Keyword arguments for SmilesGenerator.
"""
def __init__(self, subset='2d', **kwargs):
self.subset = subset
self.initialized = False
self.config_filename, self.smiles_engine = None, None
self.smiles_engine_kwargs = kwargs
def initialize(self):
"""
Initialize.
This is not part of __init__ because it breaks IPython.parallel.
"""
fd, self.config_filename = tempfile.mkstemp()
os.close(fd)
with open(self.config_filename, 'wb') as f:
f.write(self.get_config())
self.smiles_engine = SmilesGenerator(**self.smiles_engine_kwargs)
self.initialized = True
def __del__(self):
"""
Cleanup.
"""
if self.config_filename is not None:
os.unlink(self.config_filename)
def get_config(self):
"""
Get configuration file.
"""
if self.subset == '2d':
return """<?xml version="1.0" encoding="utf-8"?>
<DRAGON version="6.0.36" script_version="1" generation_date="2014/11/17">
<OPTIONS>
<CheckUpdates value="true"/>
<SaveLayout value="true"/>
<ShowWorksheet value="false"/>
<Decimal_Separator value="."/>
<Missing_String value="NaN"/>
<DefaultMolFormat value="1"/>
<HelpBrowser value="/usr/bin/xdg-open"/>
<RejectUnusualValence value="false"/>
<Add2DHydrogens value="false"/>
<MaxSRforAllCircuit value="19"/>
<MaxSR value="35"/>
<MaxSRDetour value="30"/>
<MaxAtomWalkPath value="2000"/>
<LogPathWalk value="true"/>
<LogEdge value="true"/>
<Weights>
<weight name="Mass"/>
<weight name="VdWVolume"/>
<weight name="Electronegativity"/>
<weight name="Polarizability"/>
<weight name="Ionization"/>
<weight name="I-State"/>
</Weights>
<SaveOnlyData value="false"/>
<SaveLabelsOnSeparateFile value="false"/>
<SaveFormatBlock value="%b - %n.txt"/>
<SaveFormatSubBlock value="%b-%s - %n - %m.txt"/>
<SaveExcludeMisVal value="false"/>
<SaveExcludeAllMisVal value="false"/>
<SaveExcludeConst value="false"/>
<SaveExcludeNearConst value="false"/>
<SaveExcludeStdDev value="false"/>
<SaveStdDevThreshold value="0.0001"/>
<SaveExcludeCorrelated value="false"/>
<SaveCorrThreshold value="0.95"/>
<SaveExclusionOptionsToVariables value="false"/>
<SaveExcludeMisMolecules value="false"/>
<SaveExcludeRejectedMolecules value="false"/>
</OPTIONS>
<DESCRIPTORS>
<block id="1" SelectAll="true"/>
<block id="2" SelectAll="true"/>
<block id="3" SelectAll="true"/>
<block id="4" SelectAll="true"/>
<block id="5" SelectAll="true"/>
<block id="6" SelectAll="true"/>
<block id="7" SelectAll="true"/>
<block id="8" SelectAll="true"/>
<block id="9" SelectAll="true"/>
<block id="10" SelectAll="true"/>
<block id="11" SelectAll="true"/>
<block id="12" SelectAll="true"/>
<block id="21" SelectAll="true"/>
<block id="22" SelectAll="true"/>
<block id="23" SelectAll="true"/>
<block id="24" SelectAll="true"/>
<block id="25" SelectAll="true"/>
<block id="28" SelectAll="true"/>
<block id="29" SelectAll="true"/>
</DESCRIPTORS>
<MOLFILES>
<molInput value="stdin"/>
<molInputFormat value="SMILES"/>
</MOLFILES>
<OUTPUT>
<SaveStdOut value="true"/>
<SaveProject value="false"/>
<SaveFile value="false"/>
<logMode value="stderr"/>
</OUTPUT>
</DRAGON>
"""
else:
raise NotImplementedError
def get_descriptors(self, mols):
"""
Parameters
----------
mols : array_like
Molecules.
"""
if not self.initialized:
self.initialize()
smiles = [self.smiles_engine.get_smiles(mol) for mol in mols]
args = ['dragon6shell', '-s', self.config_filename]
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate('\n'.join(smiles))
if not stdout:
raise RuntimeError(stderr)
data, names = self.parse_descriptors(stdout)
# adjust for skipped molecules
# descriptors are in same order as smiles
missing = np.setdiff1d(smiles, names)
features = np.zeros(len(smiles), dtype=object)
idx = 0 # index into calculated features
for i, this_smiles in enumerate(smiles):
if this_smiles in missing:
features[i] = None
else:
assert this_smiles == names[idx] # confirm match
features[i] = data[idx]
idx += 1
assert len(features) == len(mols)
return features
def parse_descriptors(self, string):
"""
Parse Dragon descriptors.
Parameters
----------
string : str
Output from dragon6shell.
"""
df = pd.read_table(StringIO(string))
if self.subset == '2d':
del df['nHBonds'], df['Psi_e_1d'], df['Psi_e_1s']
# extract names
names = df['NAME'].values
# delete No. and NAME columns
del df['No.'], df['NAME']
return np.asarray(df, dtype=float), names
| bsd-3-clause |
alex/kombu-fernet-serializers | setup.py | 1 | 1648 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import kombu_fernet
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = [
'kombu_fernet',
'kombu_fernet.serializers',
]
requires = [
'anyjson>=0.3.3',
'kombu>=3.0.16',
]
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='kombu-fernet-serializers',
version=kombu_fernet.__version__,
description='Symmetrically encrypted serializers for Kombu',
long_description=readme,
author='David Gouldin',
author_email='dgouldin@heroku.com',
url='https://github.com/heroku/kombu-fernet-serializers',
packages=packages,
package_data={'': ['LICENSE']},
include_package_data=True,
install_requires=requires,
license=license,
zip_safe=False,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
entry_points={
'kombu.serializers': [
'fernet_json = kombu_fernet.serializers.json:register_args',
'fernet_yaml = kombu_fernet.serializers.yaml:register_args',
'fernet_pickle = kombu_fernet.serializers.pickle:register_args',
'fernet_msgpack = kombu_fernet.serializers.msgpack:register_args',
]
}
)
| mit |
elingg/tensorflow | tensorflow/python/ops/check_ops.py | 16 | 29893 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""## Asserts and Boolean Checks
@@assert_negative
@@assert_positive
@@assert_proper_iterable
@@assert_non_negative
@@assert_non_positive
@@assert_equal
@@assert_integer
@@assert_less
@@assert_less_equal
@@assert_greater
@@assert_greater_equal
@@assert_rank
@@assert_rank_at_least
@@assert_type
@@is_non_decreasing
@@is_numeric_tensor
@@is_strictly_increasing
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import compat
NUMERIC_TYPES = frozenset(
[dtypes.float32, dtypes.float64, dtypes.int8, dtypes.int16, dtypes.int32,
dtypes.int64, dtypes.uint8, dtypes.qint8, dtypes.qint32, dtypes.quint8,
dtypes.complex64])
__all__ = [
'assert_negative',
'assert_positive',
'assert_proper_iterable',
'assert_non_negative',
'assert_non_positive',
'assert_equal',
'assert_integer',
'assert_less',
'assert_less_equal',
'assert_greater',
'assert_greater_equal',
'assert_rank',
'assert_rank_at_least',
'assert_rank_in',
'assert_type',
'is_non_decreasing',
'is_numeric_tensor',
'is_strictly_increasing',
]
def assert_proper_iterable(values):
"""Static assert that values is a "proper" iterable.
`Ops` that expect iterables of `Tensor` can call this to validate input.
Useful since `Tensor`, `ndarray`, byte/text type are all iterables themselves.
Args:
values: Object to be checked.
Raises:
TypeError: If `values` is not iterable or is one of
`Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`.
"""
unintentional_iterables = (
(ops.Tensor, sparse_tensor.SparseTensor, np.ndarray)
+ compat.bytes_or_text_types
)
if isinstance(values, unintentional_iterables):
raise TypeError(
'Expected argument "values" to be a "proper" iterable. Found: %s' %
type(values))
if not hasattr(values, '__iter__'):
raise TypeError(
'Expected argument "values" to be iterable. Found: %s' % type(values))
def assert_negative(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x < 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_negative(x)]):
output = tf.reduce_sum(x)
```
Negative means, for every element `x[i]` of `x`, we have `x[i] < 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all negative.
"""
message = message or ''
with ops.name_scope(name, 'assert_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
data = [
message, 'Condition x < 0 did not hold element-wise: x = ', x.name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(x, zero, data=data, summarize=summarize)
def assert_positive(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x > 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_positive(x)]):
output = tf.reduce_sum(x)
```
Positive means, for every element `x[i]` of `x`, we have `x[i] > 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all positive.
"""
message = message or ''
with ops.name_scope(name, 'assert_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
data = [
message, 'Condition x > 0 did not hold element-wise: x = ', x.name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(zero, x, data=data, summarize=summarize)
def assert_non_negative(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x >= 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_non_negative(x)]):
output = tf.reduce_sum(x)
```
Non-negative means, for every element `x[i]` of `x`, we have `x[i] >= 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_non_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-negative.
"""
message = message or ''
with ops.name_scope(name, 'assert_non_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
data = [
message,
'Condition x >= 0 did not hold element-wise: x = ', x.name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less_equal(zero, x, data=data, summarize=summarize)
def assert_non_positive(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x <= 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_non_positive(x)]):
output = tf.reduce_sum(x)
```
Non-positive means, for every element `x[i]` of `x`, we have `x[i] <= 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_non_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-positive.
"""
message = message or ''
with ops.name_scope(name, 'assert_non_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
data = [
message,
'Condition x <= 0 did not hold element-wise: x = ', x.name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less_equal(x, zero, data=data, summarize=summarize)
def assert_equal(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x == y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] == y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_equal".
Returns:
Op that raises `InvalidArgumentError` if `x == y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x == y did not hold element-wise: x = ', x.name, x, 'y = ',
y.name, y
]
condition = math_ops.reduce_all(math_ops.equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_less(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x < y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_less(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] < y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_less".
Returns:
Op that raises `InvalidArgumentError` if `x < y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_less', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x < y did not hold element-wise: x = ', x.name, x, 'y = ',
y.name, y
]
condition = math_ops.reduce_all(math_ops.less(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_less_equal(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x <= y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_less_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] <= y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_less_equal"
Returns:
Op that raises `InvalidArgumentError` if `x <= y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_less_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x <= y did not hold element-wise: x = ', x.name, x, 'y = ',
y.name, y
]
condition = math_ops.reduce_all(math_ops.less_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_greater(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x > y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_greater(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] > y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_greater".
Returns:
Op that raises `InvalidArgumentError` if `x > y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_greater', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x > y did not hold element-wise: x = ', x.name, x, 'y = ',
y.name, y
]
condition = math_ops.reduce_all(math_ops.greater(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_greater_equal(x, y, data=None, summarize=None, message=None,
name=None):
"""Assert the condition `x >= y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_greater_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] >= y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to
"assert_greater_equal"
Returns:
Op that raises `InvalidArgumentError` if `x >= y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_greater_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x >= y did not hold element-wise: x = ', x.name, x, 'y = ',
y.name, y
]
condition = math_ops.reduce_all(math_ops.greater_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def _assert_rank_condition(
x, rank, static_condition, dynamic_condition, data, summarize):
"""Assert `x` has a rank that satisfies a given condition.
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
static_condition: A python function that takes `[actual_rank, given_rank]`
and returns `True` if the condition is satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_rank]
and return `True` if the condition is satisfied, `False` otherwise.
data: The tensors to print out if the condition is false. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
Returns:
Op raising `InvalidArgumentError` if `x` fails dynamic_condition.
Raises:
ValueError: If static checks determine `x` fails static_condition.
"""
assert_type(rank, dtypes.int32)
# Attempt to statically defined rank.
rank_static = tensor_util.constant_value(rank)
if rank_static is not None:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
x_rank_static = x.get_shape().ndims
if x_rank_static is not None:
if not static_condition(x_rank_static, rank_static):
raise ValueError(
'Static rank condition failed', x_rank_static, rank_static)
return control_flow_ops.no_op(name='static_checks_determined_all_ok')
condition = dynamic_condition(array_ops.rank(x), rank)
# Add the condition that `rank` must have rank zero. Prevents the bug where
# someone does assert_rank(x, [n]), rather than assert_rank(x, n).
if rank_static is None:
this_data = ['Rank must be a scalar. Received rank: ', rank]
rank_check = assert_rank(rank, 0, data=this_data)
condition = control_flow_ops.with_dependencies([rank_check], condition)
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_rank(x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar integer `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_rank".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.name_scope(name, 'assert_rank', (x, rank) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = message or ''
static_condition = lambda actual_rank, given_rank: actual_rank == given_rank
dynamic_condition = math_ops.equal
if data is None:
data = [
message,
'Tensor %s must have rank' % x.name, rank, 'Received shape: ',
array_ops.shape(x)
]
try:
assert_op = _assert_rank_condition(x, rank, static_condition,
dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank %d. Received rank %d, shape %s' %
(message, x.name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
def assert_rank_at_least(
x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank` or higher.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_at_least(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_at_least".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or higher.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.name_scope(
name, 'assert_rank_at_least', (x, rank) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = message or ''
static_condition = lambda actual_rank, given_rank: actual_rank >= given_rank
dynamic_condition = math_ops.greater_equal
if data is None:
data = [
message,
'Tensor %s must have rank at least' % x.name, rank,
'Received shape: ', array_ops.shape(x)
]
try:
assert_op = _assert_rank_condition(x, rank, static_condition,
dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank at least %d. Received rank %d, '
'shape %s' % (message, x.name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
def _static_rank_in(actual_rank, given_ranks):
return actual_rank in given_ranks
def _dynamic_rank_in(actual_rank, given_ranks):
if len(given_ranks) < 1:
return ops.convert_to_tensor(False)
result = math_ops.equal(given_ranks[0], actual_rank)
for given_rank in given_ranks[1:]:
result = math_ops.logical_or(
result, math_ops.equal(given_rank, actual_rank))
return result
def _assert_ranks_condition(
x, ranks, static_condition, dynamic_condition, data, summarize):
"""Assert `x` has a rank that satisfies a given condition.
Args:
x: Numeric `Tensor`.
ranks: Scalar `Tensor`.
static_condition: A python function that takes
`[actual_rank, given_ranks]` and returns `True` if the condition is
satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_ranks]
and return `True` if the condition is satisfied, `False` otherwise.
data: The tensors to print out if the condition is false. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
Returns:
Op raising `InvalidArgumentError` if `x` fails dynamic_condition.
Raises:
ValueError: If static checks determine `x` fails static_condition.
"""
for rank in ranks:
assert_type(rank, dtypes.int32)
# Attempt to statically defined rank.
ranks_static = tuple([tensor_util.constant_value(rank) for rank in ranks])
if None not in ranks_static:
for rank_static in ranks_static:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
x_rank_static = x.get_shape().ndims
if x_rank_static is not None:
if not static_condition(x_rank_static, ranks_static):
raise ValueError(
'Static rank condition failed', x_rank_static, ranks_static)
return control_flow_ops.no_op(name='static_checks_determined_all_ok')
condition = dynamic_condition(array_ops.rank(x), ranks)
# Add the condition that `rank` must have rank zero. Prevents the bug where
# someone does assert_rank(x, [n]), rather than assert_rank(x, n).
for rank, rank_static in zip(ranks, ranks_static):
if rank_static is None:
this_data = ['Rank must be a scalar. Received rank: ', rank]
rank_check = assert_rank(rank, 0, data=this_data)
condition = control_flow_ops.with_dependencies([rank_check], condition)
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_rank_in(
x, ranks, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank in `ranks`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_in(x, (2, 4))]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
ranks: Iterable of scalar `Tensor` objects.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_in".
Returns:
Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`.
If static checks determine `x` has matching rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has mismatched rank.
"""
with ops.name_scope(
name, 'assert_rank_in', (x,) + tuple(ranks) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
ranks = tuple([ops.convert_to_tensor(rank, name='rank') for rank in ranks])
message = message or ''
if data is None:
data = [
message, 'Tensor %s must have rank in' % x.name
] + list(ranks) + [
'Received shape: ', array_ops.shape(x)
]
try:
assert_op = _assert_ranks_condition(x, ranks, _static_rank_in,
_dynamic_rank_in, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank in %s. Received rank %d, '
'shape %s' % (message, x.name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
def assert_integer(x, message=None, name=None):
"""Assert that `x` is of integer dtype.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_integer(x)]):
output = tf.reduce_sum(x)
```
Args:
x: `Tensor` whose basetype is integer and is not quantized.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_integer".
Raises:
TypeError: If `x.dtype` is anything other than non-quantized integer.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
with ops.name_scope(name, 'assert_integer', [x]):
x = ops.convert_to_tensor(x, name='x')
if not x.dtype.is_integer:
err_msg = (
'%s Expected "x" to be integer type. Found: %s of dtype %s'
% (message, x.name, x.dtype))
raise TypeError(err_msg)
return control_flow_ops.no_op('statically_determined_was_integer')
def assert_type(tensor, tf_type, message=None, name=None):
"""Statically asserts that the given `Tensor` is of the specified type.
Args:
tensor: A tensorflow `Tensor`.
tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,
etc).
message: A string to prefix to the default message.
name: A name to give this `Op`. Defaults to "assert_type"
Raises:
TypeError: If the tensors data type doesn't match `tf_type`.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
with ops.name_scope(name, 'assert_type', [tensor]):
tensor = ops.convert_to_tensor(tensor, name='tensor')
if tensor.dtype != tf_type:
raise TypeError(
'%s %s must be of type %s' % (message, tensor.op.name, tf_type))
return control_flow_ops.no_op('statically_determined_correct_type')
# pylint: disable=line-too-long
def _get_diff_for_monotonic_comparison(x):
"""Gets the difference x[1:] - x[:-1]."""
x = array_ops.reshape(x, [-1])
if not is_numeric_tensor(x):
raise TypeError('Expected x to be numeric, instead found: %s' % x)
# If x has less than 2 elements, there is nothing to compare. So return [].
is_shorter_than_two = math_ops.less(array_ops.size(x), 2)
short_result = lambda: ops.convert_to_tensor([], dtype=x.dtype)
# With 2 or more elements, return x[1:] - x[:-1]
s_len = array_ops.shape(x) - 1
diff = lambda: array_ops.strided_slice(x, [1], [1] + s_len)- array_ops.strided_slice(x, [0], s_len)
return control_flow_ops.cond(is_shorter_than_two, short_result, diff)
def is_numeric_tensor(tensor):
return isinstance(tensor, ops.Tensor) and tensor.dtype in NUMERIC_TYPES
def is_non_decreasing(x, name=None):
"""Returns `True` if `x` is non-decreasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`.
If `x` has less than two elements, it is trivially non-decreasing.
See also: `is_strictly_increasing`
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional). Defaults to "is_non_decreasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is non-decreasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.name_scope(name, 'is_non_decreasing', [x]):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less_equal = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less_equal(zero, diff))
def is_strictly_increasing(x, name=None):
"""Returns `True` if `x` is strictly increasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`.
If `x` has less than two elements, it is trivially strictly increasing.
See also: `is_non_decreasing`
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional).
Defaults to "is_strictly_increasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is strictly increasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.name_scope(name, 'is_strictly_increasing', [x]):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less(zero, diff))
| apache-2.0 |
drchrono/salesforce-python-toolkit | sforce/enterprise.py | 19 | 3596 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Written by: David Lanstein ( dlanstein gmail com )
from base import SforceBaseClient
import suds.sudsobject
class SforceEnterpriseClient(SforceBaseClient):
def __init__(self, wsdl, **kwargs):
super(SforceEnterpriseClient, self).__init__(wsdl, **kwargs)
# Core calls
def convertLead(self, leadConverts):
xml = self._marshallSObjects(leadConverts)
return super(SforceEnterpriseClient, self).convertLead(xml)
def create(self, sObjects):
xml = self._marshallSObjects(sObjects)
return super(SforceEnterpriseClient, self).create(xml)
def merge(self, sObjects):
xml = self._marshallSObjects(sObjects)
return super(SforceEnterpriseClient, self).merge(xml)
def process(self, sObjects):
xml = self._marshallSObjects(sObjects)
return super(SforceEnterpriseClient, self).process(xml)
def retrieve(self, fieldList, sObjectType, ids):
'''
Currently, this uses query() to emulate the retrieve() functionality, as suds' unmarshaller
borks on the sf: prefix that Salesforce prepends to all fields other than Id and type (any
fields not defined in the 'sObject' section of the Enterprise WSDL)
'''
# HACK HACK HACKITY HACK
if not isinstance(ids, (list, tuple)):
ids = (ids, )
# The only way to make sure we return objects in the correct order, and return None where an
# object can't be retrieved by Id, is to query each ID individually
sObjects = []
for id in ids:
queryString = 'SELECT Id, ' + fieldList + ' FROM ' + sObjectType + ' WHERE Id = \'' + id + '\' LIMIT 1'
queryResult = self.query(queryString)
if queryResult.size == 0:
sObjects.append(None)
continue
# There will exactly one record in queryResult.records[] at this point
record = queryResult.records[0]
sObject = self.generateObject(sObjectType)
for (k, v) in record:
setattr(sObject, k, v)
sObjects.append(sObject)
return self._handleResultTyping(sObjects)
def search(self, searchString):
searchResult = super(SforceEnterpriseClient, self).search(searchString)
# HACK <result/> gets unmarshalled as '' instead of an empty SearchResult
# return an empty SearchResult instead
if searchResult == '':
return self._sforce.factory.create('SearchResult')
return searchResult
def update(self, sObjects):
xml = self._marshallSObjects(sObjects)
return super(SforceEnterpriseClient, self).update(xml)
def upsert(self, externalIdFieldName, sObjects):
xml = self._marshallSObjects(sObjects)
return super(SforceEnterpriseClient, self).upsert(externalIdFieldName, xml)
# Utility calls
def sendEmail(self, sObjects):
xml = self._marshallSObjects(sObjects)
return super(SforceEnterpriseClient, self).sendEmail(xml)
| lgpl-3.0 |
fhaoquan/kbengine | kbe/src/lib/python/Lib/test/test_zlib.py | 84 | 26766 | import unittest
from test import support
import binascii
import random
import sys
from test.support import bigmemtest, _1G, _4G
zlib = support.import_module('zlib')
requires_Compress_copy = unittest.skipUnless(
hasattr(zlib.compressobj(), "copy"),
'requires Compress.copy()')
requires_Decompress_copy = unittest.skipUnless(
hasattr(zlib.decompressobj(), "copy"),
'requires Decompress.copy()')
class VersionTestCase(unittest.TestCase):
def test_library_version(self):
# Test that the major version of the actual library in use matches the
# major version that we were compiled against. We can't guarantee that
# the minor versions will match (even on the machine on which the module
# was compiled), and the API is stable between minor versions, so
# testing only the major versions avoids spurious failures.
self.assertEqual(zlib.ZLIB_RUNTIME_VERSION[0], zlib.ZLIB_VERSION[0])
class ChecksumTestCase(unittest.TestCase):
# checksum test cases
def test_crc32start(self):
self.assertEqual(zlib.crc32(b""), zlib.crc32(b"", 0))
self.assertTrue(zlib.crc32(b"abc", 0xffffffff))
def test_crc32empty(self):
self.assertEqual(zlib.crc32(b"", 0), 0)
self.assertEqual(zlib.crc32(b"", 1), 1)
self.assertEqual(zlib.crc32(b"", 432), 432)
def test_adler32start(self):
self.assertEqual(zlib.adler32(b""), zlib.adler32(b"", 1))
self.assertTrue(zlib.adler32(b"abc", 0xffffffff))
def test_adler32empty(self):
self.assertEqual(zlib.adler32(b"", 0), 0)
self.assertEqual(zlib.adler32(b"", 1), 1)
self.assertEqual(zlib.adler32(b"", 432), 432)
def assertEqual32(self, seen, expected):
# 32-bit values masked -- checksums on 32- vs 64- bit machines
# This is important if bit 31 (0x08000000L) is set.
self.assertEqual(seen & 0x0FFFFFFFF, expected & 0x0FFFFFFFF)
def test_penguins(self):
self.assertEqual32(zlib.crc32(b"penguin", 0), 0x0e5c1a120)
self.assertEqual32(zlib.crc32(b"penguin", 1), 0x43b6aa94)
self.assertEqual32(zlib.adler32(b"penguin", 0), 0x0bcf02f6)
self.assertEqual32(zlib.adler32(b"penguin", 1), 0x0bd602f7)
self.assertEqual(zlib.crc32(b"penguin"), zlib.crc32(b"penguin", 0))
self.assertEqual(zlib.adler32(b"penguin"),zlib.adler32(b"penguin",1))
def test_crc32_adler32_unsigned(self):
foo = b'abcdefghijklmnop'
# explicitly test signed behavior
self.assertEqual(zlib.crc32(foo), 2486878355)
self.assertEqual(zlib.crc32(b'spam'), 1138425661)
self.assertEqual(zlib.adler32(foo+foo), 3573550353)
self.assertEqual(zlib.adler32(b'spam'), 72286642)
def test_same_as_binascii_crc32(self):
foo = b'abcdefghijklmnop'
crc = 2486878355
self.assertEqual(binascii.crc32(foo), crc)
self.assertEqual(zlib.crc32(foo), crc)
self.assertEqual(binascii.crc32(b'spam'), zlib.crc32(b'spam'))
# Issue #10276 - check that inputs >=4GB are handled correctly.
class ChecksumBigBufferTestCase(unittest.TestCase):
@bigmemtest(size=_4G + 4, memuse=1, dry_run=False)
def test_big_buffer(self, size):
data = b"nyan" * (_1G + 1)
self.assertEqual(zlib.crc32(data), 1044521549)
self.assertEqual(zlib.adler32(data), 2256789997)
class ExceptionTestCase(unittest.TestCase):
# make sure we generate some expected errors
def test_badlevel(self):
# specifying compression level out of range causes an error
# (but -1 is Z_DEFAULT_COMPRESSION and apparently the zlib
# accepts 0 too)
self.assertRaises(zlib.error, zlib.compress, b'ERROR', 10)
def test_badargs(self):
self.assertRaises(TypeError, zlib.adler32)
self.assertRaises(TypeError, zlib.crc32)
self.assertRaises(TypeError, zlib.compress)
self.assertRaises(TypeError, zlib.decompress)
for arg in (42, None, '', 'abc', (), []):
self.assertRaises(TypeError, zlib.adler32, arg)
self.assertRaises(TypeError, zlib.crc32, arg)
self.assertRaises(TypeError, zlib.compress, arg)
self.assertRaises(TypeError, zlib.decompress, arg)
def test_badcompressobj(self):
# verify failure on building compress object with bad params
self.assertRaises(ValueError, zlib.compressobj, 1, zlib.DEFLATED, 0)
# specifying total bits too large causes an error
self.assertRaises(ValueError,
zlib.compressobj, 1, zlib.DEFLATED, zlib.MAX_WBITS + 1)
def test_baddecompressobj(self):
# verify failure on building decompress object with bad params
self.assertRaises(ValueError, zlib.decompressobj, -1)
def test_decompressobj_badflush(self):
# verify failure on calling decompressobj.flush with bad params
self.assertRaises(ValueError, zlib.decompressobj().flush, 0)
self.assertRaises(ValueError, zlib.decompressobj().flush, -1)
class BaseCompressTestCase(object):
def check_big_compress_buffer(self, size, compress_func):
_1M = 1024 * 1024
fmt = "%%0%dx" % (2 * _1M)
# Generate 10MB worth of random, and expand it by repeating it.
# The assumption is that zlib's memory is not big enough to exploit
# such spread out redundancy.
data = b''.join([random.getrandbits(8 * _1M).to_bytes(_1M, 'little')
for i in range(10)])
data = data * (size // len(data) + 1)
try:
compress_func(data)
finally:
# Release memory
data = None
def check_big_decompress_buffer(self, size, decompress_func):
data = b'x' * size
try:
compressed = zlib.compress(data, 1)
finally:
# Release memory
data = None
data = decompress_func(compressed)
# Sanity check
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b'x')), 0)
finally:
data = None
class CompressTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression in one go (whole message compression)
def test_speech(self):
x = zlib.compress(HAMLET_SCENE)
self.assertEqual(zlib.decompress(x), HAMLET_SCENE)
def test_speech128(self):
# compress more data
data = HAMLET_SCENE * 128
x = zlib.compress(data)
self.assertEqual(zlib.compress(bytearray(data)), x)
for ob in x, bytearray(x):
self.assertEqual(zlib.decompress(ob), data)
def test_incomplete_stream(self):
# An useful error message is given
x = zlib.compress(HAMLET_SCENE)
self.assertRaisesRegex(zlib.error,
"Error -5 while decompressing data: incomplete or truncated stream",
zlib.decompress, x[:-1])
# Memory use of the following functions takes into account overallocation
@bigmemtest(size=_1G + 1024 * 1024, memuse=3)
def test_big_compress_buffer(self, size):
compress = lambda s: zlib.compress(s, 1)
self.check_big_compress_buffer(size, compress)
@bigmemtest(size=_1G + 1024 * 1024, memuse=2)
def test_big_decompress_buffer(self, size):
self.check_big_decompress_buffer(size, zlib.decompress)
@bigmemtest(size=_4G + 100, memuse=1, dry_run=False)
def test_length_overflow(self, size):
data = b'x' * size
try:
self.assertRaises(OverflowError, zlib.compress, data, 1)
self.assertRaises(OverflowError, zlib.decompress, data)
finally:
data = None
class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression object
def test_pair(self):
# straightforward compress/decompress objects
datasrc = HAMLET_SCENE * 128
datazip = zlib.compress(datasrc)
# should compress both bytes and bytearray data
for data in (datasrc, bytearray(datasrc)):
co = zlib.compressobj()
x1 = co.compress(data)
x2 = co.flush()
self.assertRaises(zlib.error, co.flush) # second flush should not work
self.assertEqual(x1 + x2, datazip)
for v1, v2 in ((x1, x2), (bytearray(x1), bytearray(x2))):
dco = zlib.decompressobj()
y1 = dco.decompress(v1 + v2)
y2 = dco.flush()
self.assertEqual(data, y1 + y2)
self.assertIsInstance(dco.unconsumed_tail, bytes)
self.assertIsInstance(dco.unused_data, bytes)
def test_compressoptions(self):
# specify lots of options to compressobj()
level = 2
method = zlib.DEFLATED
wbits = -12
memlevel = 9
strategy = zlib.Z_FILTERED
co = zlib.compressobj(level, method, wbits, memlevel, strategy)
x1 = co.compress(HAMLET_SCENE)
x2 = co.flush()
dco = zlib.decompressobj(wbits)
y1 = dco.decompress(x1 + x2)
y2 = dco.flush()
self.assertEqual(HAMLET_SCENE, y1 + y2)
def test_compressincremental(self):
# compress object in steps, decompress object as one-shot
data = HAMLET_SCENE * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), 256):
bufs.append(co.compress(data[i:i+256]))
bufs.append(co.flush())
combuf = b''.join(bufs)
dco = zlib.decompressobj()
y1 = dco.decompress(b''.join(bufs))
y2 = dco.flush()
self.assertEqual(data, y1 + y2)
def test_decompinc(self, flush=False, source=None, cx=256, dcx=64):
# compress object in steps, decompress object in steps
source = source or HAMLET_SCENE
data = source * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), cx):
bufs.append(co.compress(data[i:i+cx]))
bufs.append(co.flush())
combuf = b''.join(bufs)
decombuf = zlib.decompress(combuf)
# Test type of return value
self.assertIsInstance(decombuf, bytes)
self.assertEqual(data, decombuf)
dco = zlib.decompressobj()
bufs = []
for i in range(0, len(combuf), dcx):
bufs.append(dco.decompress(combuf[i:i+dcx]))
self.assertEqual(b'', dco.unconsumed_tail, ########
"(A) uct should be b'': not %d long" %
len(dco.unconsumed_tail))
self.assertEqual(b'', dco.unused_data)
if flush:
bufs.append(dco.flush())
else:
while True:
chunk = dco.decompress(b'')
if chunk:
bufs.append(chunk)
else:
break
self.assertEqual(b'', dco.unconsumed_tail, ########
"(B) uct should be b'': not %d long" %
len(dco.unconsumed_tail))
self.assertEqual(b'', dco.unused_data)
self.assertEqual(data, b''.join(bufs))
# Failure means: "decompressobj with init options failed"
def test_decompincflush(self):
self.test_decompinc(flush=True)
def test_decompimax(self, source=None, cx=256, dcx=64):
# compress in steps, decompress in length-restricted steps
source = source or HAMLET_SCENE
# Check a decompression object with max_length specified
data = source * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), cx):
bufs.append(co.compress(data[i:i+cx]))
bufs.append(co.flush())
combuf = b''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf),
'compressed data failure')
dco = zlib.decompressobj()
bufs = []
cb = combuf
while cb:
#max_length = 1 + len(cb)//10
chunk = dco.decompress(cb, dcx)
self.assertFalse(len(chunk) > dcx,
'chunk too big (%d>%d)' % (len(chunk), dcx))
bufs.append(chunk)
cb = dco.unconsumed_tail
bufs.append(dco.flush())
self.assertEqual(data, b''.join(bufs), 'Wrong data retrieved')
def test_decompressmaxlen(self, flush=False):
# Check a decompression object with max_length specified
data = HAMLET_SCENE * 128
co = zlib.compressobj()
bufs = []
for i in range(0, len(data), 256):
bufs.append(co.compress(data[i:i+256]))
bufs.append(co.flush())
combuf = b''.join(bufs)
self.assertEqual(data, zlib.decompress(combuf),
'compressed data failure')
dco = zlib.decompressobj()
bufs = []
cb = combuf
while cb:
max_length = 1 + len(cb)//10
chunk = dco.decompress(cb, max_length)
self.assertFalse(len(chunk) > max_length,
'chunk too big (%d>%d)' % (len(chunk),max_length))
bufs.append(chunk)
cb = dco.unconsumed_tail
if flush:
bufs.append(dco.flush())
else:
while chunk:
chunk = dco.decompress(b'', max_length)
self.assertFalse(len(chunk) > max_length,
'chunk too big (%d>%d)' % (len(chunk),max_length))
bufs.append(chunk)
self.assertEqual(data, b''.join(bufs), 'Wrong data retrieved')
def test_decompressmaxlenflush(self):
self.test_decompressmaxlen(flush=True)
def test_maxlenmisc(self):
# Misc tests of max_length
dco = zlib.decompressobj()
self.assertRaises(ValueError, dco.decompress, b"", -1)
self.assertEqual(b'', dco.unconsumed_tail)
def test_clear_unconsumed_tail(self):
# Issue #12050: calling decompress() without providing max_length
# should clear the unconsumed_tail attribute.
cdata = b"x\x9cKLJ\x06\x00\x02M\x01" # "abc"
dco = zlib.decompressobj()
ddata = dco.decompress(cdata, 1)
ddata += dco.decompress(dco.unconsumed_tail)
self.assertEqual(dco.unconsumed_tail, b"")
def test_flushes(self):
# Test flush() with the various options, using all the
# different levels in order to provide more variations.
sync_opt = ['Z_NO_FLUSH', 'Z_SYNC_FLUSH', 'Z_FULL_FLUSH']
sync_opt = [getattr(zlib, opt) for opt in sync_opt
if hasattr(zlib, opt)]
data = HAMLET_SCENE * 8
for sync in sync_opt:
for level in range(10):
obj = zlib.compressobj( level )
a = obj.compress( data[:3000] )
b = obj.flush( sync )
c = obj.compress( data[3000:] )
d = obj.flush()
self.assertEqual(zlib.decompress(b''.join([a,b,c,d])),
data, ("Decompress failed: flush "
"mode=%i, level=%i") % (sync, level))
del obj
@unittest.skipUnless(hasattr(zlib, 'Z_SYNC_FLUSH'),
'requires zlib.Z_SYNC_FLUSH')
def test_odd_flush(self):
# Test for odd flushing bugs noted in 2.0, and hopefully fixed in 2.1
import random
# Testing on 17K of "random" data
# Create compressor and decompressor objects
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
dco = zlib.decompressobj()
# Try 17K of data
# generate random data stream
try:
# In 2.3 and later, WichmannHill is the RNG of the bug report
gen = random.WichmannHill()
except AttributeError:
try:
# 2.2 called it Random
gen = random.Random()
except AttributeError:
# others might simply have a single RNG
gen = random
gen.seed(1)
data = genblock(1, 17 * 1024, generator=gen)
# compress, sync-flush, and decompress
first = co.compress(data)
second = co.flush(zlib.Z_SYNC_FLUSH)
expanded = dco.decompress(first + second)
# if decompressed data is different from the input data, choke.
self.assertEqual(expanded, data, "17K random source doesn't match")
def test_empty_flush(self):
# Test that calling .flush() on unused objects works.
# (Bug #1083110 -- calling .flush() on decompress objects
# caused a core dump.)
co = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
self.assertTrue(co.flush()) # Returns a zlib header
dco = zlib.decompressobj()
self.assertEqual(dco.flush(), b"") # Returns nothing
def test_dictionary(self):
h = HAMLET_SCENE
# Build a simulated dictionary out of the words in HAMLET.
words = h.split()
random.shuffle(words)
zdict = b''.join(words)
# Use it to compress HAMLET.
co = zlib.compressobj(zdict=zdict)
cd = co.compress(h) + co.flush()
# Verify that it will decompress with the dictionary.
dco = zlib.decompressobj(zdict=zdict)
self.assertEqual(dco.decompress(cd) + dco.flush(), h)
# Verify that it fails when not given the dictionary.
dco = zlib.decompressobj()
self.assertRaises(zlib.error, dco.decompress, cd)
def test_dictionary_streaming(self):
# This simulates the reuse of a compressor object for compressing
# several separate data streams.
co = zlib.compressobj(zdict=HAMLET_SCENE)
do = zlib.decompressobj(zdict=HAMLET_SCENE)
piece = HAMLET_SCENE[1000:1500]
d0 = co.compress(piece) + co.flush(zlib.Z_SYNC_FLUSH)
d1 = co.compress(piece[100:]) + co.flush(zlib.Z_SYNC_FLUSH)
d2 = co.compress(piece[:-100]) + co.flush(zlib.Z_SYNC_FLUSH)
self.assertEqual(do.decompress(d0), piece)
self.assertEqual(do.decompress(d1), piece[100:])
self.assertEqual(do.decompress(d2), piece[:-100])
def test_decompress_incomplete_stream(self):
# This is 'foo', deflated
x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E'
# For the record
self.assertEqual(zlib.decompress(x), b'foo')
self.assertRaises(zlib.error, zlib.decompress, x[:-5])
# Omitting the stream end works with decompressor objects
# (see issue #8672).
dco = zlib.decompressobj()
y = dco.decompress(x[:-5])
y += dco.flush()
self.assertEqual(y, b'foo')
def test_decompress_eof(self):
x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' # 'foo'
dco = zlib.decompressobj()
self.assertFalse(dco.eof)
dco.decompress(x[:-5])
self.assertFalse(dco.eof)
dco.decompress(x[-5:])
self.assertTrue(dco.eof)
dco.flush()
self.assertTrue(dco.eof)
def test_decompress_eof_incomplete_stream(self):
x = b'x\x9cK\xcb\xcf\x07\x00\x02\x82\x01E' # 'foo'
dco = zlib.decompressobj()
self.assertFalse(dco.eof)
dco.decompress(x[:-5])
self.assertFalse(dco.eof)
dco.flush()
self.assertFalse(dco.eof)
def test_decompress_unused_data(self):
# Repeated calls to decompress() after EOF should accumulate data in
# dco.unused_data, instead of just storing the arg to the last call.
source = b'abcdefghijklmnopqrstuvwxyz'
remainder = b'0123456789'
y = zlib.compress(source)
x = y + remainder
for maxlen in 0, 1000:
for step in 1, 2, len(y), len(x):
dco = zlib.decompressobj()
data = b''
for i in range(0, len(x), step):
if i < len(y):
self.assertEqual(dco.unused_data, b'')
if maxlen == 0:
data += dco.decompress(x[i : i + step])
self.assertEqual(dco.unconsumed_tail, b'')
else:
data += dco.decompress(
dco.unconsumed_tail + x[i : i + step], maxlen)
data += dco.flush()
self.assertTrue(dco.eof)
self.assertEqual(data, source)
self.assertEqual(dco.unconsumed_tail, b'')
self.assertEqual(dco.unused_data, remainder)
def test_flush_with_freed_input(self):
# Issue #16411: decompressor accesses input to last decompress() call
# in flush(), even if this object has been freed in the meanwhile.
input1 = b'abcdefghijklmnopqrstuvwxyz'
input2 = b'QWERTYUIOPASDFGHJKLZXCVBNM'
data = zlib.compress(input1)
dco = zlib.decompressobj()
dco.decompress(data, 1)
del data
data = zlib.compress(input2)
self.assertEqual(dco.flush(), input1[1:])
@requires_Compress_copy
def test_compresscopy(self):
# Test copying a compression object
data0 = HAMLET_SCENE
data1 = bytes(str(HAMLET_SCENE, "ascii").swapcase(), "ascii")
c0 = zlib.compressobj(zlib.Z_BEST_COMPRESSION)
bufs0 = []
bufs0.append(c0.compress(data0))
c1 = c0.copy()
bufs1 = bufs0[:]
bufs0.append(c0.compress(data0))
bufs0.append(c0.flush())
s0 = b''.join(bufs0)
bufs1.append(c1.compress(data1))
bufs1.append(c1.flush())
s1 = b''.join(bufs1)
self.assertEqual(zlib.decompress(s0),data0+data0)
self.assertEqual(zlib.decompress(s1),data0+data1)
@requires_Compress_copy
def test_badcompresscopy(self):
# Test copying a compression object in an inconsistent state
c = zlib.compressobj()
c.compress(HAMLET_SCENE)
c.flush()
self.assertRaises(ValueError, c.copy)
@requires_Decompress_copy
def test_decompresscopy(self):
# Test copying a decompression object
data = HAMLET_SCENE
comp = zlib.compress(data)
# Test type of return value
self.assertIsInstance(comp, bytes)
d0 = zlib.decompressobj()
bufs0 = []
bufs0.append(d0.decompress(comp[:32]))
d1 = d0.copy()
bufs1 = bufs0[:]
bufs0.append(d0.decompress(comp[32:]))
s0 = b''.join(bufs0)
bufs1.append(d1.decompress(comp[32:]))
s1 = b''.join(bufs1)
self.assertEqual(s0,s1)
self.assertEqual(s0,data)
@requires_Decompress_copy
def test_baddecompresscopy(self):
# Test copying a compression object in an inconsistent state
data = zlib.compress(HAMLET_SCENE)
d = zlib.decompressobj()
d.decompress(data)
d.flush()
self.assertRaises(ValueError, d.copy)
# Memory use of the following functions takes into account overallocation
@bigmemtest(size=_1G + 1024 * 1024, memuse=3)
def test_big_compress_buffer(self, size):
c = zlib.compressobj(1)
compress = lambda s: c.compress(s) + c.flush()
self.check_big_compress_buffer(size, compress)
@bigmemtest(size=_1G + 1024 * 1024, memuse=2)
def test_big_decompress_buffer(self, size):
d = zlib.decompressobj()
decompress = lambda s: d.decompress(s) + d.flush()
self.check_big_decompress_buffer(size, decompress)
@bigmemtest(size=_4G + 100, memuse=1, dry_run=False)
def test_length_overflow(self, size):
data = b'x' * size
c = zlib.compressobj(1)
d = zlib.decompressobj()
try:
self.assertRaises(OverflowError, c.compress, data)
self.assertRaises(OverflowError, d.decompress, data)
finally:
data = None
def genblock(seed, length, step=1024, generator=random):
"""length-byte stream of random data from a seed (in step-byte blocks)."""
if seed is not None:
generator.seed(seed)
randint = generator.randint
if length < step or step < 2:
step = length
blocks = bytes()
for i in range(0, length, step):
blocks += bytes(randint(0, 255) for x in range(step))
return blocks
def choose_lines(source, number, seed=None, generator=random):
"""Return a list of number lines randomly chosen from the source"""
if seed is not None:
generator.seed(seed)
sources = source.split('\n')
return [generator.choice(sources) for n in range(number)]
HAMLET_SCENE = b"""
LAERTES
O, fear me not.
I stay too long: but here my father comes.
Enter POLONIUS
A double blessing is a double grace,
Occasion smiles upon a second leave.
LORD POLONIUS
Yet here, Laertes! aboard, aboard, for shame!
The wind sits in the shoulder of your sail,
And you are stay'd for. There; my blessing with thee!
And these few precepts in thy memory
See thou character. Give thy thoughts no tongue,
Nor any unproportioned thought his act.
Be thou familiar, but by no means vulgar.
Those friends thou hast, and their adoption tried,
Grapple them to thy soul with hoops of steel;
But do not dull thy palm with entertainment
Of each new-hatch'd, unfledged comrade. Beware
Of entrance to a quarrel, but being in,
Bear't that the opposed may beware of thee.
Give every man thy ear, but few thy voice;
Take each man's censure, but reserve thy judgment.
Costly thy habit as thy purse can buy,
But not express'd in fancy; rich, not gaudy;
For the apparel oft proclaims the man,
And they in France of the best rank and station
Are of a most select and generous chief in that.
Neither a borrower nor a lender be;
For loan oft loses both itself and friend,
And borrowing dulls the edge of husbandry.
This above all: to thine ownself be true,
And it must follow, as the night the day,
Thou canst not then be false to any man.
Farewell: my blessing season this in thee!
LAERTES
Most humbly do I take my leave, my lord.
LORD POLONIUS
The time invites you; go; your servants tend.
LAERTES
Farewell, Ophelia; and remember well
What I have said to you.
OPHELIA
'Tis in my memory lock'd,
And you yourself shall keep the key of it.
LAERTES
Farewell.
"""
def test_main():
support.run_unittest(
VersionTestCase,
ChecksumTestCase,
ChecksumBigBufferTestCase,
ExceptionTestCase,
CompressTestCase,
CompressObjectTestCase
)
if __name__ == "__main__":
unittest.main() # XXX
###test_main()
| lgpl-3.0 |
J861449197/edx-platform | common/djangoapps/cors_csrf/tests/test_views.py | 150 | 2397 | """Tests for cross-domain request views. """
import json
from django.test import TestCase
from django.core.urlresolvers import reverse, NoReverseMatch
import ddt
from config_models.models import cache
from cors_csrf.models import XDomainProxyConfiguration
@ddt.ddt
class XDomainProxyTest(TestCase):
"""Tests for the xdomain proxy end-point. """
def setUp(self):
"""Clear model-based config cache. """
super(XDomainProxyTest, self).setUp()
try:
self.url = reverse('xdomain_proxy')
except NoReverseMatch:
self.skipTest('xdomain_proxy URL is not configured')
cache.clear()
def test_xdomain_proxy_disabled(self):
self._configure(False)
response = self._load_page()
self.assertEqual(response.status_code, 404)
@ddt.data(None, [' '], [' ', ' '])
def test_xdomain_proxy_enabled_no_whitelist(self, whitelist):
self._configure(True, whitelist=whitelist)
response = self._load_page()
self.assertEqual(response.status_code, 404)
@ddt.data(
(['example.com'], ['example.com']),
(['example.com', 'sub.example.com'], ['example.com', 'sub.example.com']),
([' example.com '], ['example.com']),
([' ', 'example.com'], ['example.com']),
)
@ddt.unpack
def test_xdomain_proxy_enabled_with_whitelist(self, whitelist, expected_whitelist):
self._configure(True, whitelist=whitelist)
response = self._load_page()
self._check_whitelist(response, expected_whitelist)
def _configure(self, is_enabled, whitelist=None):
"""Enable or disable the end-point and configure the whitelist. """
config = XDomainProxyConfiguration.current()
config.enabled = is_enabled
if whitelist:
config.whitelist = "\n".join(whitelist)
config.save()
cache.clear()
def _load_page(self):
"""Load the end-point. """
return self.client.get(reverse('xdomain_proxy'))
def _check_whitelist(self, response, expected_whitelist):
"""Verify that the domain whitelist is rendered on the page. """
rendered_whitelist = json.dumps({
domain: '*'
for domain in expected_whitelist
})
self.assertContains(response, 'xdomain.min.js')
self.assertContains(response, rendered_whitelist)
| agpl-3.0 |
artdent/mingus-python3 | mingus/core/__init__.py | 5 | 1124 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
================================================================================
Mingus - Music theory Python module, core package
Copyright (C) 2008-2009, Bart Spaans
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
================================================================================
"""
__all__ = [
'notes',
'diatonic',
'intervals',
'chords',
'scales',
'meter',
'progressions',
'mt_exceptions',
'value',
]
| gpl-3.0 |
piotroxp/scibibscan | scib/lib/python3.5/site-packages/astropy/extern/ply/cpp.py | 192 | 33040 | # -----------------------------------------------------------------------------
# cpp.py
#
# Author: David Beazley (http://www.dabeaz.com)
# Copyright (C) 2007
# All rights reserved
#
# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
from __future__ import generators
# -----------------------------------------------------------------------------
# Default preprocessor lexer definitions. These tokens are enough to get
# a basic preprocessor working. Other modules may import these if they want
# -----------------------------------------------------------------------------
tokens = (
'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT', 'CPP_POUND','CPP_DPOUND'
)
literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
# Whitespace
def t_CPP_WS(t):
r'\s+'
t.lexer.lineno += t.value.count("\n")
return t
t_CPP_POUND = r'\#'
t_CPP_DPOUND = r'\#\#'
# Identifier
t_CPP_ID = r'[A-Za-z_][\w_]*'
# Integer literal
def CPP_INTEGER(t):
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU]|[lL]|[uU][lL]|[lL][uU])?)'
return t
t_CPP_INTEGER = CPP_INTEGER
# Floating literal
t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
def t_CPP_STRING(t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
t.lexer.lineno += t.value.count("\n")
return t
# Character constant 'c' or L'c'
def t_CPP_CHAR(t):
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
t.lexer.lineno += t.value.count("\n")
return t
# Comment
def t_CPP_COMMENT(t):
r'(/\*(.|\n)*?\*/)|(//.*?\n)'
t.lexer.lineno += t.value.count("\n")
return t
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
t.lexer.skip(1)
return t
import re
import copy
import time
import os.path
# -----------------------------------------------------------------------------
# trigraph()
#
# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
# ??/ \
# ??' ^
# ??( [
# ??) ]
# ??! |
# ??< {
# ??> }
# ??- ~
# -----------------------------------------------------------------------------
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
_trigraph_rep = {
'=':'#',
'/':'\\',
"'":'^',
'(':'[',
')':']',
'!':'|',
'<':'{',
'>':'}',
'-':'~'
}
def trigraph(input):
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
# ------------------------------------------------------------------
# Macro object
#
# This object holds information about preprocessor macros
#
# .name - Macro name (string)
# .value - Macro value (a list of tokens)
# .arglist - List of argument names
# .variadic - Boolean indicating whether or not variadic macro
# .vararg - Name of the variadic parameter
#
# When a macro is created, the macro replacement token sequence is
# pre-scanned and used to create patch lists that are later used
# during macro expansion
# ------------------------------------------------------------------
class Macro(object):
def __init__(self,name,value,arglist=None,variadic=False):
self.name = name
self.value = value
self.arglist = arglist
self.variadic = variadic
if variadic:
self.vararg = arglist[-1]
self.source = None
# ------------------------------------------------------------------
# Preprocessor object
#
# Object representing a preprocessor. Contains macro definitions,
# include directories, and other information
# ------------------------------------------------------------------
class Preprocessor(object):
def __init__(self,lexer=None):
if lexer is None:
lexer = lex.lexer
self.lexer = lexer
self.macros = { }
self.path = []
self.temp_path = []
# Probe the lexer for selected tokens
self.lexprobe()
tm = time.localtime()
self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
self.parser = None
# -----------------------------------------------------------------------------
# tokenize()
#
# Utility function. Given a string of text, tokenize into a list of tokens
# -----------------------------------------------------------------------------
def tokenize(self,text):
tokens = []
self.lexer.input(text)
while True:
tok = self.lexer.token()
if not tok: break
tokens.append(tok)
return tokens
# ---------------------------------------------------------------------
# error()
#
# Report a preprocessor error/warning of some kind
# ----------------------------------------------------------------------
def error(self,file,line,msg):
print("%s:%d %s" % (file,line,msg))
# ----------------------------------------------------------------------
# lexprobe()
#
# This method probes the preprocessor lexer object to discover
# the token types of symbols that are important to the preprocessor.
# If this works right, the preprocessor will simply "work"
# with any suitable lexer regardless of how tokens have been named.
# ----------------------------------------------------------------------
def lexprobe(self):
# Determine the token type for identifiers
self.lexer.input("identifier")
tok = self.lexer.token()
if not tok or tok.value != "identifier":
print("Couldn't determine identifier type")
else:
self.t_ID = tok.type
# Determine the token type for integers
self.lexer.input("12345")
tok = self.lexer.token()
if not tok or int(tok.value) != 12345:
print("Couldn't determine integer type")
else:
self.t_INTEGER = tok.type
self.t_INTEGER_TYPE = type(tok.value)
# Determine the token type for strings enclosed in double quotes
self.lexer.input("\"filename\"")
tok = self.lexer.token()
if not tok or tok.value != "\"filename\"":
print("Couldn't determine string type")
else:
self.t_STRING = tok.type
# Determine the token type for whitespace--if any
self.lexer.input(" ")
tok = self.lexer.token()
if not tok or tok.value != " ":
self.t_SPACE = None
else:
self.t_SPACE = tok.type
# Determine the token type for newlines
self.lexer.input("\n")
tok = self.lexer.token()
if not tok or tok.value != "\n":
self.t_NEWLINE = None
print("Couldn't determine token for newlines")
else:
self.t_NEWLINE = tok.type
self.t_WS = (self.t_SPACE, self.t_NEWLINE)
# Check for other characters used by the preprocessor
chars = [ '<','>','#','##','\\','(',')',',','.']
for c in chars:
self.lexer.input(c)
tok = self.lexer.token()
if not tok or tok.value != c:
print("Unable to lex '%s' required for preprocessor" % c)
# ----------------------------------------------------------------------
# add_path()
#
# Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
self.path.append(path)
# ----------------------------------------------------------------------
# group_lines()
#
# Given an input string, this function splits it into lines. Trailing whitespace
# is removed. Any line ending with \ is grouped with the next line. This
# function forms the lowest level of the preprocessor---grouping into text into
# a line-by-line format.
# ----------------------------------------------------------------------
def group_lines(self,input):
lex = self.lexer.clone()
lines = [x.rstrip() for x in input.splitlines()]
for i in xrange(len(lines)):
j = i+1
while lines[i].endswith('\\') and (j < len(lines)):
lines[i] = lines[i][:-1]+lines[j]
lines[j] = ""
j += 1
input = "\n".join(lines)
lex.input(input)
lex.lineno = 1
current_line = []
while True:
tok = lex.token()
if not tok:
break
current_line.append(tok)
if tok.type in self.t_WS and '\n' in tok.value:
yield current_line
current_line = []
if current_line:
yield current_line
# ----------------------------------------------------------------------
# tokenstrip()
#
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
def tokenstrip(self,tokens):
i = 0
while i < len(tokens) and tokens[i].type in self.t_WS:
i += 1
del tokens[:i]
i = len(tokens)-1
while i >= 0 and tokens[i].type in self.t_WS:
i -= 1
del tokens[i+1:]
return tokens
# ----------------------------------------------------------------------
# collect_args()
#
# Collects comma separated arguments from a list of tokens. The arguments
# must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
# where tokencount is the number of tokens consumed, args is a list of arguments,
# and positions is a list of integers containing the starting index of each
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
# from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
# ----------------------------------------------------------------------
def collect_args(self,tokenlist):
args = []
positions = []
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
i += 1
if (i < tokenlen) and (tokenlist[i].value == '('):
positions.append(i+1)
else:
self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
return 0, [], []
i += 1
while i < tokenlen:
t = tokenlist[i]
if t.value == '(':
current_arg.append(t)
nesting += 1
elif t.value == ')':
nesting -= 1
if nesting == 0:
if current_arg:
args.append(self.tokenstrip(current_arg))
positions.append(i)
return i+1,args,positions
current_arg.append(t)
elif t.value == ',' and nesting == 1:
args.append(self.tokenstrip(current_arg))
positions.append(i+1)
current_arg = []
else:
current_arg.append(t)
i += 1
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
# ----------------------------------------------------------------------
# macro_prescan()
#
# Examine the macro value (token sequence) and identify patch points
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
def macro_prescan(self,macro):
macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
while i < len(macro.value):
if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
argnum = macro.arglist.index(macro.value[i].value)
# Conversion of argument to a string
if i > 0 and macro.value[i-1].value == '#':
macro.value[i] = copy.copy(macro.value[i])
macro.value[i].type = self.t_STRING
del macro.value[i-1]
macro.str_patch.append((argnum,i-1))
continue
# Concatenation
elif (i > 0 and macro.value[i-1].value == '##'):
macro.patch.append(('c',argnum,i-1))
del macro.value[i-1]
continue
elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
macro.patch.append(('c',argnum,i))
i += 1
continue
# Standard expansion
else:
macro.patch.append(('e',argnum,i))
elif macro.value[i].value == '##':
if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
(macro.value[i+1].value == macro.vararg):
macro.var_comma_patch.append(i-1)
i += 1
macro.patch.sort(key=lambda x: x[2],reverse=True)
# ----------------------------------------------------------------------
# macro_expand_args()
#
# Given a Macro and list of arguments (each a token list), this method
# returns an expanded version of a macro. The return value is a token sequence
# representing the replacement macro tokens
# ----------------------------------------------------------------------
def macro_expand_args(self,macro,args):
# Make a copy of the macro token sequence
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
rep[i] = copy.copy(rep[i])
rep[i].value = str_expansion[argnum]
# Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
comma_patch = False
if macro.variadic and not args[-1]:
for i in macro.var_comma_patch:
rep[i] = None
comma_patch = True
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
if ptype == 'c':
rep[i:i+1] = args[argnum]
# Normal expansion. Argument is macro expanded first
elif ptype == 'e':
if argnum not in expanded:
expanded[argnum] = self.expand_macros(args[argnum])
rep[i:i+1] = expanded[argnum]
# Get rid of removed comma if necessary
if comma_patch:
rep = [_i for _i in rep if _i]
return rep
# ----------------------------------------------------------------------
# expand_macros()
#
# Given a list of tokens, this function performs macro expansion.
# The expanded argument is a dictionary that contains macros already
# expanded. This is used to prevent infinite recursion.
# ----------------------------------------------------------------------
def expand_macros(self,tokens,expanded=None):
if expanded is None:
expanded = {}
i = 0
while i < len(tokens):
t = tokens[i]
if t.type == self.t_ID:
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
m = self.macros[t.value]
if not m.arglist:
# A simple macro
ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
for e in ex:
e.lineno = t.lineno
tokens[i:i+1] = ex
i += len(ex)
else:
# A macro with arguments
j = i + 1
while j < len(tokens) and tokens[j].type in self.t_WS:
j += 1
if tokens[j].value == '(':
tokcount,args,positions = self.collect_args(tokens[j:])
if not m.variadic and len(args) != len(m.arglist):
self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
i = j + tokcount
elif m.variadic and len(args) < len(m.arglist)-1:
if len(m.arglist) > 2:
self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
else:
self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
i = j + tokcount
else:
if m.variadic:
if len(args) == len(m.arglist)-1:
args.append([])
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
for r in rep:
r.lineno = t.lineno
tokens[i:j+tokcount] = rep
i += len(rep)
del expanded[t.value]
continue
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
i += 1
return tokens
# ----------------------------------------------------------------------
# evalexpr()
#
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
def evalexpr(self,tokens):
# tokens = tokenize(line)
# Search for defined macros
i = 0
while i < len(tokens):
if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
j = i + 1
needparen = False
result = "0L"
while j < len(tokens):
if tokens[j].type in self.t_WS:
j += 1
continue
elif tokens[j].type == self.t_ID:
if tokens[j].value in self.macros:
result = "1L"
else:
result = "0L"
if not needparen: break
elif tokens[j].value == '(':
needparen = True
elif tokens[j].value == ')':
break
else:
self.error(self.source,tokens[i].lineno,"Malformed defined()")
j += 1
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE(result)
del tokens[i+1:j+1]
i += 1
tokens = self.expand_macros(tokens)
for i,t in enumerate(tokens):
if t.type == self.t_ID:
tokens[i] = copy.copy(t)
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE("0L")
elif t.type == self.t_INTEGER:
tokens[i] = copy.copy(t)
# Strip off any trailing suffixes
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
expr = expr.replace("!"," not ")
try:
result = eval(expr)
except StandardError:
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
result = 0
return result
# ----------------------------------------------------------------------
# parsegen()
#
# Parse an input string/
# ----------------------------------------------------------------------
def parsegen(self,input,source=None):
# Replace trigraph sequences
t = trigraph(input)
lines = self.group_lines(t)
if not source:
source = ""
self.define("__FILE__ \"%s\"" % source)
self.source = source
chunk = []
enable = True
iftrigger = False
ifstack = []
for x in lines:
for i,tok in enumerate(x):
if tok.type not in self.t_WS: break
if tok.value == '#':
# Preprocessor directive
for tok in x:
if tok in self.t_WS and '\n' in tok.value:
chunk.append(tok)
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
args = self.tokenstrip(dirtokens[1:])
else:
name = ""
args = []
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.define(args)
elif name == 'include':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
oldfile = self.macros['__FILE__']
for tok in self.include(args):
yield tok
self.macros['__FILE__'] = oldfile
self.source = source
elif name == 'undef':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.undef(args)
elif name == 'ifdef':
ifstack.append((enable,iftrigger))
if enable:
if not args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'ifndef':
ifstack.append((enable,iftrigger))
if enable:
if args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'if':
ifstack.append((enable,iftrigger))
if enable:
result = self.evalexpr(args)
if not result:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'elif':
if ifstack:
if ifstack[-1][0]: # We only pay attention if outer "if" allows this
if enable: # If already true, we flip enable False
enable = False
elif not iftrigger: # If False, but not triggered yet, we'll check expression
result = self.evalexpr(args)
if result:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
if enable:
enable = False
elif not iftrigger:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
elif name == 'endif':
if ifstack:
enable,iftrigger = ifstack.pop()
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
else:
# Unknown preprocessor directive
pass
else:
# Normal text
if enable:
chunk.extend(x)
for tok in self.expand_macros(chunk):
yield tok
chunk = []
# ----------------------------------------------------------------------
# include()
#
# Implementation of file-inclusion
# ----------------------------------------------------------------------
def include(self,tokens):
# Try to extract the filename and then process an include file
if not tokens:
return
if tokens:
if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
tokens = self.expand_macros(tokens)
if tokens[0].value == '<':
# Include <...>
i = 1
while i < len(tokens):
if tokens[i].value == '>':
break
i += 1
else:
print("Malformed #include <...>")
return
filename = "".join([x.value for x in tokens[1:i]])
path = self.path + [""] + self.temp_path
elif tokens[0].type == self.t_STRING:
filename = tokens[0].value[1:-1]
path = self.temp_path + [""] + self.path
else:
print("Malformed #include statement")
return
for p in path:
iname = os.path.join(p,filename)
try:
data = open(iname,"r").read()
dname = os.path.dirname(iname)
if dname:
self.temp_path.insert(0,dname)
for tok in self.parsegen(data,filename):
yield tok
if dname:
del self.temp_path[0]
break
except IOError:
pass
else:
print("Couldn't find '%s'" % filename)
# ----------------------------------------------------------------------
# define()
#
# Define a new macro
# ----------------------------------------------------------------------
def define(self,tokens):
if isinstance(tokens,(str,unicode)):
tokens = self.tokenize(tokens)
linetok = tokens
try:
name = linetok[0]
if len(linetok) > 1:
mtype = linetok[1]
else:
mtype = None
if not mtype:
m = Macro(name.value,[])
self.macros[name.value] = m
elif mtype.type in self.t_WS:
# A normal macro
m = Macro(name.value,self.tokenstrip(linetok[2:]))
self.macros[name.value] = m
elif mtype.value == '(':
# A macro with arguments
tokcount, args, positions = self.collect_args(linetok[1:])
variadic = False
for a in args:
if variadic:
print("No more arguments may follow a variadic argument")
break
astr = "".join([str(_i.value) for _i in a])
if astr == "...":
variadic = True
a[0].type = self.t_ID
a[0].value = '__VA_ARGS__'
variadic = True
del a[1:]
continue
elif astr[-3:] == "..." and a[0].type == self.t_ID:
variadic = True
del a[1:]
# If, for some reason, "." is part of the identifier, strip off the name for the purposes
# of macro expansion
if a[0].value[-3:] == '...':
a[0].value = a[0].value[:-3]
continue
if len(a) > 1 or a[0].type != self.t_ID:
print("Invalid macro argument")
break
else:
mvalue = self.tokenstrip(linetok[1+tokcount:])
i = 0
while i < len(mvalue):
if i+1 < len(mvalue):
if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
del mvalue[i]
continue
elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
del mvalue[i+1]
i += 1
m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
self.macro_prescan(m)
self.macros[name.value] = m
else:
print("Bad macro definition")
except LookupError:
print("Bad macro definition")
# ----------------------------------------------------------------------
# undef()
#
# Undefine a macro
# ----------------------------------------------------------------------
def undef(self,tokens):
id = tokens[0].value
try:
del self.macros[id]
except LookupError:
pass
# ----------------------------------------------------------------------
# parse()
#
# Parse input text.
# ----------------------------------------------------------------------
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
# ----------------------------------------------------------------------
# token()
#
# Method to return individual tokens
# ----------------------------------------------------------------------
def token(self):
try:
while True:
tok = next(self.parser)
if tok.type not in self.ignore: return tok
except StopIteration:
self.parser = None
return None
if __name__ == '__main__':
import ply.lex as lex
lexer = lex.lex()
# Run a preprocessor
import sys
f = open(sys.argv[1])
input = f.read()
p = Preprocessor(lexer)
p.parse(input,sys.argv[1])
while True:
tok = p.token()
if not tok: break
print(p.source, tok)
| mit |
edevil/django | django/conf/locale/ml/formats.py | 394 | 1815 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause |
egnyte/python-egnyte | egnyte/events.py | 1 | 4981 | from __future__ import unicode_literals
import time
from egnyte import base, exc, resources
class Event(base.Resource):
"""
Event data.
Attributes:
* id - event id
* timestamp - date of event in iso8061 format
* action_source - event source: WebUI, SyncEngine, Mobile or PublicAPI
* actor - id of user that generate event
* type - event type. For now we will support: file_system or note
* action - event action. For now we will support: create, delete, move, copy, or rename
* object_detail - url to pub api that provide detail info about object from event like https://domain.egnyte.com/pubapi/v1/fs/Shared
* data - additional data specific for event type and action
Possible fields for 'data' field:
for 'type'='file' and action create or delete
'target_id' - entry id of create/deleted file
'target_path' - path to created/deleted file
for 'type'='file' and action move/copy/rename
'source_path' - source path to moved/copied/renamed file
'target_path' - target path to moved/copied/renamed file
'source_id' - source entry id of moved/copied/renamed file (for move/rename there is one entry id so could be only one field or same data for source_id and target_id)
'target_id' - target entry id of moved/copied/renamed file
for 'type'='folder' and action create or delete
'target_path' - path to created/deleted folder
'folder_id' - folder id of created/deleted folder
for 'type'='folder' and action move/copy/rename
'source_path' - source path to moved/copied/renamed folder
'target_path' - target path to moved/copied/renamed folder
'source_id' - source folder id of moved/copied/renamed folder
'target_id' - target folder id of moved/copied/renamed folder
for 'type'='note' and any available action (create, delete)
'note_id' - id of added/deleted note
"""
_url_template = "pubapi/v1/events/%(id)s"
def user(self):
"""Get a user object based on event attributes"""
return resources.User(self._client, id=self.actor)
class Events(base.Resource):
"""
Events.
Attributes:
* latest_event_id - id of latest event
* oldest_event_id - id of oldest available event
* timestamp - iso8601 timestamp of latest event
"""
_url_template = "pubapi/v1/events/cursor"
_url_template_list = "pubapi/v1/events"
_lazy_attributes = {'latest_event_id', 'oldest_event_id', 'timestamp'}
poll_delay = 1.0
start_id = None
suppress = None
folder = None
types = None
def filter(self, start_id=None, suppress=None, folder=None, types=None):
"""
Returns a filtered view of the events,
Parameters:
* start_id - return all events occurred after id from the previous request (the events shouldn't overlap between calls). defaults to latest_event_id
* folder (optional) - return events occurred only for this folders and all its content (subfolders, files and notes).
* suppress (optional) - filter out events from requesting client or filter out events from requesting client done by requesting user. Allowed values: app, user or none (defaults to no filter)
* types (optional) - return only events of given types.
"""
if types is not None:
types = '|'.join(types)
d = self.__dict__.copy()
d.update(base.filter_none_values(dict(start_id=start_id, suppress=suppress, folder=folder, type=types)))
return self.__class__(**d)
def list(self, start_id, count=None):
"""
Get detailed data about up to 'count' events 'start_id'.
"""
if start_id is None:
start_id = self.start_id
params = base.filter_none_values(dict(id=start_id, suppress=self.suppress, type=self.types, count=count, folder=self.folder))
url = self._client.get_url(self._url_template_list)
json = exc.no_content_ok.check_json_response(self._client.GET(url, params=params))
if json is None:
return ()
else:
return base.ResultList((Event(self._client, **d) for d in json.get('events', ())), json['latest_id'], start_id)
def poll(self, count=None):
"""
List events starting with latest_event_id, if any found, update start_id and return them.
"""
if self.start_id is None:
self.start_id = self.latest_event_id
results = self.list(self.start_id, count)
if results:
last = results[-1]
self.start_id = last.id
self.timestamp = last.timestamp
return results
def __iter__(self):
"""Never ending generator of events. Will block if necessary"""
while True:
results = self.poll()
for x in results:
yield x
if not results:
time.sleep(self.poll_delay)
| mit |
wuga214/Django-Wuga | env/lib/python2.7/site-packages/pip/_vendor/lockfile/symlinklockfile.py | 536 | 2616 | from __future__ import absolute_import
import os
import time
from . import (LockBase, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class SymlinkLockFile(LockBase):
"""Lock access to a file using symlink(2)."""
def __init__(self, path, threaded=True, timeout=None):
# super(SymlinkLockFile).__init(...)
LockBase.__init__(self, path, threaded, timeout)
# split it back!
self.unique_name = os.path.split(self.unique_name)[1]
def acquire(self, timeout=None):
# Hopefully unnecessary for symlink.
# try:
# open(self.unique_name, "wb").close()
# except IOError:
# raise LockFailed("failed to create %s" % self.unique_name)
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a symbolic link to it.
try:
os.symlink(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
if self.i_am_locking():
# Linked to out unique name. Proceed.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout / 10 if timeout is not None else 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.islink(self.lock_file)
def i_am_locking(self):
return (os.path.islink(self.lock_file)
and os.readlink(self.lock_file) == self.unique_name)
def break_lock(self):
if os.path.islink(self.lock_file): # exists && link
os.unlink(self.lock_file)
| apache-2.0 |
hsaito/linux | scripts/gdb/linux/cpus.py | 997 | 3560 | #
# gdb helper commands and functions for Linux kernel debugging
#
# per-cpu tools
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import tasks, utils
MAX_CPUS = 4096
def get_current_cpu():
if utils.get_gdbserver_type() == utils.GDBSERVER_QEMU:
return gdb.selected_thread().num - 1
elif utils.get_gdbserver_type() == utils.GDBSERVER_KGDB:
tid = gdb.selected_thread().ptid[2]
if tid > (0x100000000 - MAX_CPUS - 2):
return 0x100000000 - tid - 2
else:
return tasks.get_thread_info(tasks.get_task_by_pid(tid))['cpu']
else:
raise gdb.GdbError("Sorry, obtaining the current CPU is not yet "
"supported with this gdb server.")
def per_cpu(var_ptr, cpu):
if cpu == -1:
cpu = get_current_cpu()
if utils.is_target_arch("sparc:v9"):
offset = gdb.parse_and_eval(
"trap_block[{0}].__per_cpu_base".format(str(cpu)))
else:
try:
offset = gdb.parse_and_eval(
"__per_cpu_offset[{0}]".format(str(cpu)))
except gdb.error:
# !CONFIG_SMP case
offset = 0
pointer = var_ptr.cast(utils.get_long_type()) + offset
return pointer.cast(var_ptr.type).dereference()
cpu_mask = {}
def cpu_mask_invalidate(event):
global cpu_mask
cpu_mask = {}
gdb.events.stop.disconnect(cpu_mask_invalidate)
if hasattr(gdb.events, 'new_objfile'):
gdb.events.new_objfile.disconnect(cpu_mask_invalidate)
def cpu_list(mask_name):
global cpu_mask
mask = None
if mask_name in cpu_mask:
mask = cpu_mask[mask_name]
if mask is None:
mask = gdb.parse_and_eval(mask_name + ".bits")
if hasattr(gdb, 'events'):
cpu_mask[mask_name] = mask
gdb.events.stop.connect(cpu_mask_invalidate)
if hasattr(gdb.events, 'new_objfile'):
gdb.events.new_objfile.connect(cpu_mask_invalidate)
bits_per_entry = mask[0].type.sizeof * 8
num_entries = mask.type.sizeof * 8 / bits_per_entry
entry = -1
bits = 0
while True:
while bits == 0:
entry += 1
if entry == num_entries:
return
bits = mask[entry]
if bits != 0:
bit = 0
break
while bits & 1 == 0:
bits >>= 1
bit += 1
cpu = entry * bits_per_entry + bit
bits >>= 1
bit += 1
yield cpu
class PerCpu(gdb.Function):
"""Return per-cpu variable.
$lx_per_cpu("VAR"[, CPU]): Return the per-cpu variable called VAR for the
given CPU number. If CPU is omitted, the CPU of the current context is used.
Note that VAR has to be quoted as string."""
def __init__(self):
super(PerCpu, self).__init__("lx_per_cpu")
def invoke(self, var_name, cpu=-1):
var_ptr = gdb.parse_and_eval("&" + var_name.string())
return per_cpu(var_ptr, cpu)
PerCpu()
class LxCurrentFunc(gdb.Function):
"""Return current task.
$lx_current([CPU]): Return the per-cpu task variable for the given CPU
number. If CPU is omitted, the CPU of the current context is used."""
def __init__(self):
super(LxCurrentFunc, self).__init__("lx_current")
def invoke(self, cpu=-1):
var_ptr = gdb.parse_and_eval("¤t_task")
return per_cpu(var_ptr, cpu).dereference()
LxCurrentFunc()
| gpl-2.0 |
slank/ansible | lib/ansible/modules/network/f5/bigip_ssl_certificate.py | 32 | 16292 | #!/usr/bin/python
#
# (c) 2016, Kevin Coming (@waffie1)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
module: bigip_ssl_certificate
short_description: Import/Delete certificates from BIG-IP
description:
- This module will import/delete SSL certificates on BIG-IP LTM.
Certificates can be imported from certificate and key files on the local
disk, in PEM format.
version_added: 2.2
options:
cert_content:
description:
- When used instead of 'cert_src', sets the contents of a certificate directly
to the specified value. This is used with lookup plugins or for anything
with formatting or templating. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
key_content:
description:
- When used instead of 'key_src', sets the contents of a certificate key
directly to the specified value. This is used with lookup plugins or for
anything with formatting or templating. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
state:
description:
- Certificate and key state. This determines if the provided certificate
and key is to be made C(present) on the device or C(absent).
required: true
default: present
choices:
- present
- absent
partition:
description:
- BIG-IP partition to use when adding/deleting certificate.
required: false
default: Common
name:
description:
- SSL Certificate Name. This is the cert/key pair name used
when importing a certificate/key into the F5. It also
determines the filenames of the objects on the LTM
(:Partition:name.cer_11111_1 and :Partition_name.key_11111_1).
required: true
cert_src:
description:
- This is the local filename of the certificate. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
key_src:
description:
- This is the local filename of the private key. Either one of C(key_src),
C(key_content), C(cert_src) or C(cert_content) must be provided when
C(state) is C(present).
required: false
passphrase:
description:
- Passphrase on certificate private key
required: false
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires the netaddr Python package on the host.
- If you use this module, you will not be able to remove the certificates
and keys that are managed, via the web UI. You can only remove them via
tmsh or these modules.
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 1.5.0
- BigIP >= v12
author:
- Kevin Coming (@waffie1)
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Import PEM Certificate from local disk
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
cert_src: "/path/to/cert.crt"
key_src: "/path/to/key.key"
delegate_to: localhost
- name: Use a file lookup to import PEM Certificate
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
cert_content: "{{ lookup('file', '/path/to/cert.crt') }}"
key_content: "{{ lookup('file', '/path/to/key.key') }}"
delegate_to: localhost
- name: "Delete Certificate"
bigip_ssl_certificate:
name: "certificate-name"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
delegate_to: localhost
'''
RETURN = '''
cert_name:
description: >
The name of the SSL certificate. The C(cert_name) and
C(key_name) will be equal to each other.
returned:
- created
- changed
- deleted
type: string
sample: "cert1"
key_name:
description: >
The name of the SSL certificate key. The C(key_name) and
C(cert_name) will be equal to each other.
returned:
- created
- changed
- deleted
type: string
sample: "key1"
partition:
description: Partition in which the cert/key was created
returned:
- changed
- created
- deleted
type: string
sample: "Common"
key_checksum:
description: SHA1 checksum of the key that was provided
return:
- changed
- created
type: string
sample: "cf23df2207d99a74fbe169e3eba035e633b65d94"
cert_checksum:
description: SHA1 checksum of the cert that was provided
return:
- changed
- created
type: string
sample: "f7ff9e8b7bb2e09b70935a5d785e0cc5d9d0abf0"
'''
try:
from f5.bigip.contexts import TransactionContextManager
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
import hashlib
import StringIO
class BigIpSslCertificate(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
required_args = ['key_content', 'key_src', 'cert_content', 'cert_src']
ksource = kwargs['key_src']
if ksource:
with open(ksource) as f:
kwargs['key_content'] = f.read()
csource = kwargs['cert_src']
if csource:
with open(csource) as f:
kwargs['cert_content'] = f.read()
if kwargs['state'] == 'present':
if not any(kwargs[k] is not None for k in required_args):
raise F5ModuleError(
"Either 'key_content', 'key_src', 'cert_content' or "
"'cert_src' must be provided"
)
# This is the remote BIG-IP path from where it will look for certs
# to install.
self.dlpath = '/var/config/rest/downloads'
# The params that change in the module
self.cparams = dict()
# Stores the params that are sent to the module
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def exists(self):
cert = self.cert_exists()
key = self.key_exists()
if cert and key:
return True
else:
return False
def get_hash(self, content):
k = hashlib.sha1()
s = StringIO.StringIO(content)
while True:
data = s.read(1024)
if not data:
break
k.update(data)
return k.hexdigest()
def present(self):
current = self.read()
changed = False
do_key = False
do_cert = False
chash = None
khash = None
check_mode = self.params['check_mode']
name = self.params['name']
partition = self.params['partition']
cert_content = self.params['cert_content']
key_content = self.params['key_content']
passphrase = self.params['passphrase']
# Technically you dont need to provide us with anything in the form
# of content for your cert, but that's kind of illogical, so we just
# return saying you didn't "do" anything if you left the cert and keys
# empty.
if not cert_content and not key_content:
return False
if key_content is not None:
if 'key_checksum' in current:
khash = self.get_hash(key_content)
if khash not in current['key_checksum']:
do_key = "update"
else:
do_key = "create"
if cert_content is not None:
if 'cert_checksum' in current:
chash = self.get_hash(cert_content)
if chash not in current['cert_checksum']:
do_cert = "update"
else:
do_cert = "create"
if do_cert or do_key:
changed = True
params = dict()
params['cert_name'] = name
params['key_name'] = name
params['partition'] = partition
if khash:
params['key_checksum'] = khash
if chash:
params['cert_checksum'] = chash
self.cparams = params
if check_mode:
return changed
if not do_cert and not do_key:
return False
tx = self.api.tm.transactions.transaction
with TransactionContextManager(tx) as api:
if do_cert:
# Upload the content of a certificate as a StringIO object
cstring = StringIO.StringIO(cert_content)
filename = "%s.crt" % (name)
filepath = os.path.join(self.dlpath, filename)
api.shared.file_transfer.uploads.upload_stringio(
cstring,
filename
)
if do_cert == "update":
# Install the certificate
params = {
'name': name,
'partition': partition
}
cert = api.tm.sys.file.ssl_certs.ssl_cert.load(**params)
# This works because, while the source path is the same,
# calling update causes the file to be re-read
cert.update()
changed = True
elif do_cert == "create":
# Install the certificate
params = {
'sourcePath': "file://" + filepath,
'name': name,
'partition': partition
}
api.tm.sys.file.ssl_certs.ssl_cert.create(**params)
changed = True
if do_key:
# Upload the content of a certificate key as a StringIO object
kstring = StringIO.StringIO(key_content)
filename = "%s.key" % (name)
filepath = os.path.join(self.dlpath, filename)
api.shared.file_transfer.uploads.upload_stringio(
kstring,
filename
)
if do_key == "update":
# Install the key
params = {
'name': name,
'partition': partition
}
key = api.tm.sys.file.ssl_keys.ssl_key.load(**params)
params = dict()
if passphrase:
params['passphrase'] = passphrase
else:
params['passphrase'] = None
key.update(**params)
changed = True
elif do_key == "create":
# Install the key
params = {
'sourcePath': "file://" + filepath,
'name': name,
'partition': partition
}
if passphrase:
params['passphrase'] = self.params['passphrase']
else:
params['passphrase'] = None
api.tm.sys.file.ssl_keys.ssl_key.create(**params)
changed = True
return changed
def key_exists(self):
return self.api.tm.sys.file.ssl_keys.ssl_key.exists(
name=self.params['name'],
partition=self.params['partition']
)
def cert_exists(self):
return self.api.tm.sys.file.ssl_certs.ssl_cert.exists(
name=self.params['name'],
partition=self.params['partition']
)
def read(self):
p = dict()
name = self.params['name']
partition = self.params['partition']
if self.key_exists():
key = self.api.tm.sys.file.ssl_keys.ssl_key.load(
name=name,
partition=partition
)
if hasattr(key, 'checksum'):
p['key_checksum'] = str(key.checksum)
if self.cert_exists():
cert = self.api.tm.sys.file.ssl_certs.ssl_cert.load(
name=name,
partition=partition
)
if hasattr(cert, 'checksum'):
p['cert_checksum'] = str(cert.checksum)
p['name'] = name
return p
def flush(self):
result = dict()
state = self.params['state']
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.cparams)
result.update(dict(changed=changed))
return result
def absent(self):
changed = False
if self.exists():
changed = self.delete()
return changed
def delete(self):
changed = False
check_mode = self.params['check_mode']
delete_cert = self.cert_exists()
delete_key = self.key_exists()
if not delete_cert and not delete_key:
return changed
if check_mode:
params = dict()
params['cert_name'] = name
params['key_name'] = name
params['partition'] = partition
self.cparams = params
return True
tx = self.api.tm.transactions.transaction
with TransactionContextManager(tx) as api:
if delete_cert:
# Delete the certificate
c = api.tm.sys.file.ssl_certs.ssl_cert.load(
name=self.params['name'],
partition=self.params['partition']
)
c.delete()
changed = True
if delete_key:
# Delete the certificate key
k = self.api.tm.sys.file.ssl_keys.ssl_key.load(
name=self.params['name'],
partition=self.params['partition']
)
k.delete()
changed = True
return changed
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
name=dict(type='str', required=True),
cert_content=dict(type='str', default=None),
cert_src=dict(type='path', default=None),
key_content=dict(type='str', default=None),
key_src=dict(type='path', default=None),
passphrase=dict(type='str', default=None, no_log=True)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['key_content', 'key_src'],
['cert_content', 'cert_src']
]
)
try:
obj = BigIpSslCertificate(check_mode=module.check_mode,
**module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
tensorport/mnist | mnist.py | 1 | 11743 | # MIT License, see LICENSE
# Copyright (c) 2018 Clusterone Inc.
# Author: Adrian Yi, adrian@clusterone.com
from __future__ import print_function
import json
import os
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import tensorflow as tf
from clusterone import get_data_path, get_logs_path
from utils import train_dataset, test_dataset
def parse_args():
"""Parse arguments"""
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter,
description='''Train a convolution neural network with MNIST dataset.
For distributed mode, the script will use few environment variables as defaults:
JOB_NAME, TASK_INDEX, PS_HOSTS, and WORKER_HOSTS. These environment variables will be
available on distributed Tensorflow jobs on Clusterone platform by default.
If running this locally, you will need to set these environment variables
or pass them in as arguments (i.e. python mnist.py --job_name worker --task_index 0
--worker_hosts "localhost:2222,localhost:2223" --ps_hosts "localhost:2224").
If these are not set, the script will run in non-distributed (single instance) mode.''')
# Configuration for distributed task
parser.add_argument('--job_name', type=str, default=os.environ.get('JOB_NAME', None), choices=['worker', 'ps'],
help='Task type for the node in the distributed cluster. Worker-0 will be set as master.')
parser.add_argument('--task_index', type=int, default=os.environ.get('TASK_INDEX', 0),
help='Worker task index, should be >= 0. task_index=0 is the chief worker.')
parser.add_argument('--ps_hosts', type=str, default=os.environ.get('PS_HOSTS', ''),
help='Comma-separated list of hostname:port pairs.')
parser.add_argument('--worker_hosts', type=str, default=os.environ.get('WORKER_HOSTS', ''),
help='Comma-separated list of hostname:port pairs.')
# Experiment related parameters
parser.add_argument('--local_data_root', type=str, default=os.path.abspath('./data/'),
help='Path to dataset. This path will be /data on Clusterone.')
parser.add_argument('--local_log_root', type=str, default=os.path.abspath('./logs/'),
help='Path to store logs and checkpoints. This path will be /logs on Clusterone.')
parser.add_argument('--data_subpath', type=str, default='',
help='Which sub-directory the data will sit inside local_data_root (locally) ' +
'or /data/ (on Clusterone).')
# CNN model params
parser.add_argument('--kernel_size', type=int, default=3,
help='Size of the CNN kernels to use.')
parser.add_argument('--hidden_units', type=str, default='32,64',
help='Comma-separated list of integers. Number of hidden units to use in CNN model.')
parser.add_argument('--learning_rate', type=float, default=0.001,
help='Initial learning rate used in Adam optimizer.')
parser.add_argument('--learning_decay', type=float, default=0.0001,
help='Exponential decay rate of the learning rate per step.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate used after each convolutional layer.')
parser.add_argument('--batch_size', type=int, default=512,
help='Batch size to use during training and evaluation.')
# Training params
parser.add_argument('--verbosity', type=str, default='INFO', choices=['CRITICAL', 'ERROR', 'WARN', 'INFO', 'DEBUG'],
help='TF logging level. To see intermediate results printed, set this to INFO or DEBUG.')
parser.add_argument('--fashion', action='store_true',
help='Download and use fashion MNIST data instead of the default handwritten digit MNIST.')
parser.add_argument('--parallel_batches', type=int, default=2,
help='Number of parallel batches to prepare in data pipeline.')
parser.add_argument('--max_ckpts', type=int, default=2,
help='Maximum number of checkpoints to keep.')
parser.add_argument('--ckpt_steps', type=int, default=100,
help='How frequently to save a model checkpoint.')
parser.add_argument('--save_summary_steps', type=int, default=10,
help='How frequently to save TensorBoard summaries.')
parser.add_argument('--log_step_count_steps', type=int, default=10,
help='How frequently to log loss & global steps/s.')
parser.add_argument('--eval_secs', type=int, default=60,
help='How frequently to run evaluation step.')
# Parse args
opts = parser.parse_args()
opts.data_dir = get_data_path(dataset_name='*/*',
local_root=opts.local_data_root,
local_repo='',
path=opts.data_subpath)
opts.log_dir = get_logs_path(root=opts.local_log_root)
opts.hidden_units = [int(n) for n in opts.hidden_units.split(',')]
if opts.worker_hosts:
opts.worker_hosts = opts.worker_hosts.split(',')
else:
opts.worker_hosts = []
if opts.ps_hosts:
opts.ps_hosts = opts.ps_hosts.split(',')
else:
opts.ps_hosts = []
return opts
def make_tf_config(opts):
"""Returns TF_CONFIG that can be used to set the environment variable necessary for distributed training"""
if all([opts.job_name is None, not opts.ps_hosts, not opts.worker_hosts]):
return {}
elif any([opts.job_name is None, not opts.ps_hosts, not opts.worker_hosts]):
tf.logging.warn('Distributed setting is incomplete. You must pass job_name, ps_hosts, and worker_hosts.')
if opts.job_name is None:
tf.logging.warn('Expected job_name of worker or ps. Received {}.'.format(opts.job_name))
if not opts.ps_hosts:
tf.logging.warn('Expected ps_hosts, list of hostname:port pairs. Got {}. '.format(opts.ps_hosts) +
'Example: --ps_hosts "localhost:2224" or --ps_hosts "localhost:2224,localhost:2225')
if not opts.worker_hosts:
tf.logging.warn('Expected worker_hosts, list of hostname:port pairs. Got {}. '.format(opts.worker_hosts) +
'Example: --worker_hosts "localhost:2222,localhost:2223"')
tf.logging.warn('Ignoring distributed arguments. Running single mode.')
return {}
tf_config = {
'task': {
'type': opts.job_name,
'index': opts.task_index
},
'cluster': {
'master': [opts.worker_hosts[0]],
'worker': opts.worker_hosts,
'ps': opts.ps_hosts
},
'environment': 'cloud'
}
# Nodes may need to refer to itself as localhost
local_ip = 'localhost:' + tf_config['cluster'][opts.job_name][opts.task_index].split(':')[1]
tf_config['cluster'][opts.job_name][opts.task_index] = local_ip
if opts.job_name == 'worker' and opts.task_index == 0:
tf_config['task']['type'] = 'master'
tf_config['cluster']['master'][0] = local_ip
return tf_config
def get_input_fn(opts, is_train=True):
"""Returns input_fn. is_train=True shuffles and repeats data indefinitely"""
def input_fn():
with tf.device('/cpu:0'):
if is_train:
dataset = train_dataset(opts.data_dir, fashion=opts.fashion)
dataset = dataset.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=5 * opts.batch_size, count=None))
else:
dataset = test_dataset(opts.data_dir, fashion=opts.fashion)
dataset = dataset.batch(batch_size=opts.batch_size)
iterator = dataset.make_one_shot_iterator()
return iterator.get_next()
return input_fn
def cnn_net(input_tensor, opts):
"""Return logits output from CNN net"""
temp = tf.reshape(input_tensor, shape=(-1, 28, 28, 1), name='input_image')
for i, n_units in enumerate(opts.hidden_units):
temp = tf.layers.conv2d(temp, filters=n_units, kernel_size=opts.kernel_size, strides=(2, 2),
activation=tf.nn.relu, name='cnn' + str(i))
temp = tf.layers.dropout(temp, rate=opts.dropout)
temp = tf.reduce_mean(temp, axis=(2, 3), keepdims=False, name='average')
return tf.layers.dense(temp, 10)
def get_model_fn(opts):
"""Return model fn to be used for Estimator class"""
def model_fn(features, labels, mode):
"""Returns EstimatorSpec for different mode (train/eval/predict)"""
logits = cnn_net(features, opts)
pred = tf.cast(tf.argmax(logits, axis=1), tf.int64)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions={'logits': logits, 'pred': pred})
cent = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy')
loss = tf.reduce_mean(cent, name='loss')
metrics = {'accuracy': tf.metrics.accuracy(labels=labels, predictions=pred, name='accuracy')}
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
optimizer = tf.train.AdamOptimizer(learning_rate=opts.learning_rate)
train_op = optimizer.minimize(loss, global_step=tf.train.get_or_create_global_step())
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
return model_fn
def main(opts):
"""Main"""
# Create an estimator
config = tf.estimator.RunConfig(
model_dir=opts.log_dir,
save_summary_steps=opts.save_summary_steps,
save_checkpoints_steps=opts.ckpt_steps,
keep_checkpoint_max=opts.max_ckpts,
log_step_count_steps=opts.log_step_count_steps)
estimator = tf.estimator.Estimator(
model_fn=get_model_fn(opts),
config=config)
# Create input fn
# We do not provide evaluation data, so we'll just use training data for both train & evaluation.
train_input_fn = get_input_fn(opts, is_train=True)
eval_input_fn = get_input_fn(opts, is_train=False)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
max_steps=1e6)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,
steps=None,
start_delay_secs=0,
throttle_secs=opts.eval_secs)
# Train and evaluate!
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
if __name__ == "__main__":
args = parse_args()
tf.logging.set_verbosity(args.verbosity)
tf.logging.debug('=' * 20 + ' Environment Variables ' + '=' * 20)
for k, v in os.environ.items():
tf.logging.debug('{}: {}'.format(k, v))
tf.logging.debug('=' * 20 + ' Arguments ' + '=' * 20)
for k, v in sorted(args.__dict__.items()):
if v is not None:
tf.logging.debug('{}: {}'.format(k, v))
TF_CONFIG = make_tf_config(args)
tf.logging.debug('=' * 20 + ' TF_CONFIG ' + '=' * 20)
tf.logging.debug(TF_CONFIG)
os.environ['TF_CONFIG'] = json.dumps(TF_CONFIG)
tf.logging.info('=' * 20 + ' Train starting ' + '=' * 20)
main(args)
| mit |
canerbasaran/onionshare | test/onionshare_helpers_test.py | 2 | 1034 | """
OnionShare | https://onionshare.org/
Copyright (C) 2015 Micah Lee <micah@micahflee.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from onionshare import helpers
from nose import with_setup
import test_helpers
def test_get_platform_returns_platform_system():
"""get_platform() returns platform.system() when ONIONSHARE_PLATFORM is not defined"""
helpers.platform.system = lambda: 'Sega Saturn'
assert helpers.get_platform() == 'Sega Saturn'
| gpl-3.0 |
globaltoken/globaltoken | test/functional/rpc_rawtransaction.py | 1 | 18199 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"], ["-addresstype=legacy"]]
def setup_network(self, split=False):
super().setup_network()
connect_nodes_bi(self.nodes,0,2)
def run_test(self):
#prepare some coins for multiple *rawtransaction commands
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
# Test getrawtransaction on genesis block coinbase returns an error
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-3, "Expected type object", self.nodes[0].createrawtransaction, [], 'foo')
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
#########################################
# sendrawtransaction with missing input #
#########################################
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS AN INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx2['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.info(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.info(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.info(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
if __name__ == '__main__':
RawTransactionsTest().main()
| mit |
ddepaoli3/fuel-main-dev | fuelweb_ui_test/pageobjects/node_interfaces_settings.py | 4 | 1360 | from pageobjects.base import PageObject
from pageobjects.settings import SettingsFooter
from selenium.webdriver.support.select import Select
class InterfacesSettings(PageObject, SettingsFooter):
@property
def interfaces(self):
elements = self.parent.\
find_elements_by_css_selector('.physical-network-box')
return [Interface(e) for e in elements]
class Interface(PageObject):
def __init__(self, element):
PageObject.__init__(self, element)
@property
def info(self):
return self.parent.find_element_by_css_selector('.network-info-box')
@property
def networks_box(self):
return self.parent.find_element_by_css_selector('.logical-network-box')
@property
def networks(self):
elements = self.parent.\
find_elements_by_css_selector('.logical-network-item')
return {el.find_element_by_css_selector('.name').text.lower(): el
for el in elements}
@property
def interface_checkbox(self):
return self.parent.find_element_by_css_selector('.custom-tumbler')
@property
def bond_mode(self):
return self.parent.find_element_by_css_selector('.network-bond-mode')
@property
def select_mode(self):
return Select(
self.parent.find_element_by_css_selector('select[name=mode]'))
| apache-2.0 |
L337hium/dhquery | dhquery.py | 1 | 7156 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fileencoding=utf-8
# vim:tabstop=2
from random import Random
from optparse import OptionParser
from pydhcplib.dhcp_packet import DhcpPacket
from pydhcplib.dhcp_network import DhcpClient
from pydhcplib.type_hw_addr import hwmac
from pydhcplib.type_ipv4 import ipv4
import socket
import sys
r = Random()
r.seed()
dhcpTypes = {
1: 'DISCOVER',
2: 'OFFER',
3: 'REQUEST',
4: 'DECLINE',
5: 'ACK',
6: 'NACK',
7: 'RELEASE',
8: 'INFORM',
}
nagiosRC = {
0: 'OK',
1: 'Warning',
2: 'Critical',
3: 'Unknown',
}
def nagiosExit(rc,message):
print "%s: %s"%(nagiosRC.get(rc,'???'),message)
sys.exit(rc)
class SilentClient(DhcpClient):
def HandleDhcpAck(self,p):
return
def HandleDhcpNack(self,p):
return
def HandleDhcpOffer(self,p):
return
def HandleDhcpUnknown(self,p):
return
def HandleDhcpDiscover(self,p):
return
def genxid():
decxid = r.randint(0,0xffffffff)
xid = []
for i in xrange(4):
xid.insert(0, decxid & 0xff)
decxid = decxid >> 8
return xid
def genmac():
i = []
for z in xrange(6):
i.append(r.randint(0,255))
return ':'.join(map(lambda x: "%02x"%x,i))
def receivePacket(serverip, serverport, timeout, req):
"""Sends and receives packet from DHCP server"""
client = SilentClient(client_listen_port=67, server_listen_port=serverport)
client.dhcp_socket.settimeout(timeout)
if serverip == '0.0.0.0': req.SetOption('flags',[128, 0])
req_type = req.GetOption('dhcp_message_type')[0]
client.SendDhcpPacketTo(serverip,req)
# Don't wait answer for RELEASE message
if req_type == 7: return None
res = client.GetNextDhcpPacket()
# Try next packet if this packet is the same as packet we've sent.
if res.GetOption('dhcp_message_type')[0] == req_type: res = client.GetNextDhcpPacket()
return res
def preparePacket(xid=None,giaddr='0.0.0.0',chaddr='00:00:00:00:00:00',ciaddr='0.0.0.0',msgtype='discover',required_opts=[]):
req = DhcpPacket()
req.SetOption('op',[1])
req.SetOption('htype',[1])
req.SetOption('hlen',[6])
req.SetOption('hops',[0])
if not xid: xid = genxid()
req.SetOption('xid',xid)
req.SetOption('giaddr',ipv4(giaddr).list())
req.SetOption('chaddr',hwmac(chaddr).list() + [0] * 10)
req.SetOption('ciaddr',ipv4(ciaddr).list())
if msgtype == 'request':
mt = 3
elif msgtype == 'release':
mt = 7
else:
mt = 1
req.SetOption('dhcp_message_type',[mt])
# req.SetOption('parameter_request_list',1)
return req
def main():
parser = OptionParser()
parser.add_option("-s","--server", dest="server", default='0.0.0.0', help="DHCP server IP (default %default)")
parser.add_option("-p","--port", type="int", dest="port", default=67, help="DHCP server port (default (%default)")
parser.add_option("-m","--mac","--chaddr", dest="chaddr", help="chaddr: Client's MAC address, default random")
parser.add_option("-c","--ciaddr", dest="ciaddr", default='0.0.0.0', help="ciaddr: Client's desired IP address")
parser.add_option("-g","--giaddr", dest="giaddr", default='0.0.0.0', help="giaddr: Gateway IP address (if any)")
parser.add_option("-t","--type", dest="msgtype", type="choice", choices=["discover","request","release"],
default="discover", help="DHCP message type: discover, request, release (default %default)")
parser.add_option("-w","--timeout", dest="timeout", type="int", default=4, help="UDP timeout (default %default)")
parser.add_option("-r","--require", action="append", type="int", default=[1,3,6,51], dest="required_opts", help="Require options by its number")
parser.add_option("-y","--cycle", action="store_true", dest="docycle", help="Do full cycle: DISCOVERY, REQUEST, RELEASE")
parser.add_option("-n","--cycles", dest="cycles", type="int", default="1", help="Do number of cycles (default %default)")
parser.add_option("-v","--verbose", action="store_true", dest="verbose", help="Verbose operation")
parser.add_option("-q","--quiet", action="store_false", dest="verbose", help="Quiet operation")
parser.add_option("--nagios", action="store_true", dest="nagios", help="Nagios mode of operation")
(opts, args) = parser.parse_args()
if not opts.chaddr:
chaddr = genmac()
else:
chaddr = opts.chaddr
if opts.nagios: opts.verbose = False
verbose = opts.verbose
if opts.docycle:
request_dhcp_message_type = "discover"
else:
request_dhcp_message_type = opts.msgtype
request_ciaddr = opts.ciaddr
serverip = opts.server
cycleno = 1
xid = genxid()
while True:
if opts.cycles > 1 and opts.verbose is not False and (not opts.docycle or request_dhcp_message_type == "discover"):
print "="*100
print "| Cycle %s"%cycleno
print "="*100
req = preparePacket(xid=xid, giaddr=opts.giaddr, chaddr=chaddr, ciaddr=request_ciaddr, msgtype=request_dhcp_message_type, required_opts=opts.required_opts)
if verbose != False:
print "Sending %s [%s] packet to %s"%(request_dhcp_message_type.upper(),chaddr, opts.server)
if verbose == True:
print "-"*100
req.PrintHeaders()
req.PrintOptions()
print "="*100
print "\n"
try:
res = receivePacket(serverip=serverip, serverport=opts.port, timeout=opts.timeout, req=req)
except socket.timeout:
res = None
if opts.nagios: nagiosExit(2,"%s request has been timed out."%request_dhcp_message_type.upper())
if verbose != False: print "Timed out."
pass
if res:
dhcp_message_type = res.GetOption('dhcp_message_type')[0]
server_identifier = ipv4(res.GetOption('server_identifier'))
chaddr = hwmac(res.GetOption('chaddr')[:6])
yiaddr = ipv4(res.GetOption('yiaddr'))
if opts.nagios and dhcp_message_type not in (2, 5):
nagiosExit(2,"Got %s response for our %s request"%(dhcpTypes.get(dhcp_message_type,'UNKNOWN'),dhcpTypes.get(request_dhcp_message_type,'UNKNOWN')))
if verbose != False:
print "Received %s packet from %s; [%s] was bound to %s"%(dhcpTypes.get(dhcp_message_type,'UNKNOWN'), server_identifier, chaddr, yiaddr )
if verbose == True:
print "-"*100
res.PrintHeaders()
res.PrintOptions()
print "="*100
print "\n"
if opts.docycle:
if dhcp_message_type == 2:
request_dhcp_message_type = 'request'
request_ciaddr = yiaddr.str()
serverip = server_identifier.str()
continue
if dhcp_message_type == 5:
request_dhcp_message_type = 'release'
request_ciaddr = yiaddr.str()
serverip = server_identifier.str()
continue
cycleno += 1
if cycleno > opts.cycles:
if opts.nagios:
if res:
nagiosExit(0,"%s finished successfully: %s. yiaddr: %s, chaddr: %s"%(
request_dhcp_message_type.upper(),
dhcpTypes.get(dhcp_message_type,'UNKNOWN'),
yiaddr,
chaddr,
))
elif opts.docycle:
nagiosExit(0,"Cycle has been finished successfully. Got %s for %s"%(yiaddr,chaddr))
else:
nagiosExit(0,"%s has been finished without the answer"%(request_dhcp_message_type.upper()))
break
if opts.docycle:
request_dhcp_message_type = "discover"
request_ciaddr = opts.ciaddr
serverip = opts.server
xid = genxid()
if not opts.chaddr:
chaddr = genmac()
else:
chaddr = opts.chaddr
if __name__ == '__main__':
main()
| gpl-2.0 |
AdrianHuang/rt-thread | bsp/stm32f40x/rtconfig.py | 9 | 3663 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'E:/Program Files/CodeSourcery/Sourcery G++ Lite/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil'
elif CROSS_TOOL == 'iar':
print '================ERROR============================'
print 'Not support iar yet!'
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
STM32_TYPE = 'STM32F4XX'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -g -Wall -DSTM32F407ZG -DSTM32F4XX -DUSE_STDPERIPH_DRIVER -D__ASSEMBLY__ -D__FPU_USED'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -lm -lgcc -lc' + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread-stm32.map,-cref,-u,Reset_Handler -T stm32_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cortex-m4.fp'
CFLAGS = DEVICE + ' --apcs=interwork -DUSE_STDPERIPH_DRIVER -DSTM32F40_41xxx'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-stm32.map --scatter stm32_rom.sct'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = ' -D USE_STDPERIPH_DRIVER' + ' -D STM32F10X_HD'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --debug'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + IAR_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' -Ol'
CFLAGS += ' --use_c++_inline'
AFLAGS = ''
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu None'
LFLAGS = ' --config stm32f10x_flash.icf'
LFLAGS += ' --redirect _Printf=_PrintfTiny'
LFLAGS += ' --redirect _Scanf=_ScanfSmall'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = IAR_PATH + '/arm/bin/'
POST_ACTION = ''
| gpl-2.0 |
HackFisher/depot_tools | tests/gclient_utils_test.py | 44 | 7351 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import StringIO
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from testing_support.super_mox import SuperMoxTestBase
from testing_support import trial_dir
import gclient_utils
import subprocess2
class GclientUtilBase(SuperMoxTestBase):
def setUp(self):
super(GclientUtilBase, self).setUp()
gclient_utils.sys.stdout.flush = lambda: None
self.mox.StubOutWithMock(subprocess2, 'Popen')
self.mox.StubOutWithMock(subprocess2, 'communicate')
class CheckCallAndFilterTestCase(GclientUtilBase):
class ProcessIdMock(object):
def __init__(self, test_string):
self.stdout = StringIO.StringIO(test_string)
self.pid = 9284
# pylint: disable=R0201
def wait(self):
return 0
def _inner(self, args, test_string):
cwd = 'bleh'
gclient_utils.sys.stdout.write(
'\n________ running \'boo foo bar\' in \'bleh\'\n')
for i in test_string:
gclient_utils.sys.stdout.write(i)
# pylint: disable=E1101
subprocess2.Popen(
args,
cwd=cwd,
stdout=subprocess2.PIPE,
stderr=subprocess2.STDOUT,
bufsize=0).AndReturn(self.ProcessIdMock(test_string))
os.getcwd()
self.mox.ReplayAll()
compiled_pattern = gclient_utils.re.compile(r'a(.*)b')
line_list = []
capture_list = []
def FilterLines(line):
line_list.append(line)
assert isinstance(line, str), type(line)
match = compiled_pattern.search(line)
if match:
capture_list.append(match.group(1))
gclient_utils.CheckCallAndFilterAndHeader(
args, cwd=cwd, always=True, filter_fn=FilterLines)
self.assertEquals(line_list, ['ahah', 'accb', 'allo', 'addb'])
self.assertEquals(capture_list, ['cc', 'dd'])
def testCheckCallAndFilter(self):
args = ['boo', 'foo', 'bar']
test_string = 'ahah\naccb\nallo\naddb\n'
self._inner(args, test_string)
self.checkstdout('\n________ running \'boo foo bar\' in \'bleh\'\n'
'ahah\naccb\nallo\naddb\n\n'
'________ running \'boo foo bar\' in \'bleh\'\nahah\naccb\nallo\naddb'
'\n')
def testNoLF(self):
# Exactly as testCheckCallAndFilterAndHeader without trailing \n
args = ['boo', 'foo', 'bar']
test_string = 'ahah\naccb\nallo\naddb'
self._inner(args, test_string)
self.checkstdout('\n________ running \'boo foo bar\' in \'bleh\'\n'
'ahah\naccb\nallo\naddb\n'
'________ running \'boo foo bar\' in \'bleh\'\nahah\naccb\nallo\naddb')
class SplitUrlRevisionTestCase(GclientUtilBase):
def testSSHUrl(self):
url = "ssh://test@example.com/test.git"
rev = "ac345e52dc"
out_url, out_rev = gclient_utils.SplitUrlRevision(url)
self.assertEquals(out_rev, None)
self.assertEquals(out_url, url)
out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev))
self.assertEquals(out_rev, rev)
self.assertEquals(out_url, url)
url = "ssh://example.com/test.git"
out_url, out_rev = gclient_utils.SplitUrlRevision(url)
self.assertEquals(out_rev, None)
self.assertEquals(out_url, url)
out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev))
self.assertEquals(out_rev, rev)
self.assertEquals(out_url, url)
url = "ssh://example.com/git/test.git"
out_url, out_rev = gclient_utils.SplitUrlRevision(url)
self.assertEquals(out_rev, None)
self.assertEquals(out_url, url)
out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev))
self.assertEquals(out_rev, rev)
self.assertEquals(out_url, url)
rev = "test-stable"
out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev))
self.assertEquals(out_rev, rev)
self.assertEquals(out_url, url)
url = "ssh://user-name@example.com/~/test.git"
out_url, out_rev = gclient_utils.SplitUrlRevision(url)
self.assertEquals(out_rev, None)
self.assertEquals(out_url, url)
out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev))
self.assertEquals(out_rev, rev)
self.assertEquals(out_url, url)
url = "ssh://user-name@example.com/~username/test.git"
out_url, out_rev = gclient_utils.SplitUrlRevision(url)
self.assertEquals(out_rev, None)
self.assertEquals(out_url, url)
out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev))
self.assertEquals(out_rev, rev)
self.assertEquals(out_url, url)
url = "git@github.com:dart-lang/spark.git"
out_url, out_rev = gclient_utils.SplitUrlRevision(url)
self.assertEquals(out_rev, None)
self.assertEquals(out_url, url)
out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev))
self.assertEquals(out_rev, rev)
self.assertEquals(out_url, url)
def testSVNUrl(self):
url = "svn://example.com/test"
rev = "ac345e52dc"
out_url, out_rev = gclient_utils.SplitUrlRevision(url)
self.assertEquals(out_rev, None)
self.assertEquals(out_url, url)
out_url, out_rev = gclient_utils.SplitUrlRevision("%s@%s" % (url, rev))
self.assertEquals(out_rev, rev)
self.assertEquals(out_url, url)
class GClientUtilsTest(trial_dir.TestCase):
def testHardToDelete(self):
# Use the fact that tearDown will delete the directory to make it hard to do
# so.
l1 = os.path.join(self.root_dir, 'l1')
l2 = os.path.join(l1, 'l2')
l3 = os.path.join(l2, 'l3')
f3 = os.path.join(l3, 'f3')
os.mkdir(l1)
os.mkdir(l2)
os.mkdir(l3)
gclient_utils.FileWrite(f3, 'foo')
os.chmod(f3, 0)
os.chmod(l3, 0)
os.chmod(l2, 0)
os.chmod(l1, 0)
def testUpgradeToHttps(self):
values = [
['', ''],
[None, None],
['foo', 'https://foo'],
['http://foo', 'https://foo'],
['foo/', 'https://foo/'],
['ssh-svn://foo', 'ssh-svn://foo'],
['ssh-svn://foo/bar/', 'ssh-svn://foo/bar/'],
['codereview.chromium.org', 'https://codereview.chromium.org'],
['codereview.chromium.org/', 'https://codereview.chromium.org/'],
['http://foo:10000', 'http://foo:10000'],
['http://foo:10000/bar', 'http://foo:10000/bar'],
['foo:10000', 'http://foo:10000'],
['foo:', 'https://foo:'],
]
for content, expected in values:
self.assertEquals(
expected, gclient_utils.UpgradeToHttps(content))
def testParseCodereviewSettingsContent(self):
values = [
['# bleh\n', {}],
['\t# foo : bar\n', {}],
['Foo:bar', {'Foo': 'bar'}],
['Foo:bar:baz\n', {'Foo': 'bar:baz'}],
[' Foo : bar ', {'Foo': 'bar'}],
[' Foo : bar \n', {'Foo': 'bar'}],
['a:b\n\rc:d\re:f', {'a': 'b', 'c': 'd', 'e': 'f'}],
['an_url:http://value/', {'an_url': 'http://value/'}],
[
'CODE_REVIEW_SERVER : http://r/s',
{'CODE_REVIEW_SERVER': 'https://r/s'}
],
['VIEW_VC:http://r/s', {'VIEW_VC': 'https://r/s'}],
]
for content, expected in values:
self.assertEquals(
expected, gclient_utils.ParseCodereviewSettingsContent(content))
if __name__ == '__main__':
import unittest
unittest.main()
# vim: ts=2:sw=2:tw=80:et:
| bsd-3-clause |
ojengwa/oh-mainline | vendor/packages/gdata/src/gdata/service.py | 78 | 69609 | #!/usr/bin/python
#
# Copyright (C) 2006,2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GDataService provides CRUD ops. and programmatic login for GData services.
Error: A base exception class for all exceptions in the gdata_client
module.
CaptchaRequired: This exception is thrown when a login attempt results in a
captcha challenge from the ClientLogin service. When this
exception is thrown, the captcha_token and captcha_url are
set to the values provided in the server's response.
BadAuthentication: Raised when a login attempt is made with an incorrect
username or password.
NotAuthenticated: Raised if an operation requiring authentication is called
before a user has authenticated.
NonAuthSubToken: Raised if a method to modify an AuthSub token is used when
the user is either not authenticated or is authenticated
through another authentication mechanism.
NonOAuthToken: Raised if a method to modify an OAuth token is used when the
user is either not authenticated or is authenticated through
another authentication mechanism.
RequestError: Raised if a CRUD request returned a non-success code.
UnexpectedReturnType: Raised if the response from the server was not of the
desired type. For example, this would be raised if the
server sent a feed when the client requested an entry.
GDataService: Encapsulates user credentials needed to perform insert, update
and delete operations with the GData API. An instance can
perform user authentication, query, insertion, deletion, and
update.
Query: Eases query URI creation by allowing URI parameters to be set as
dictionary attributes. For example a query with a feed of
'/base/feeds/snippets' and ['bq'] set to 'digital camera' will
produce '/base/feeds/snippets?bq=digital+camera' when .ToUri() is
called on it.
"""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import re
import urllib
import urlparse
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom.service
import gdata
import atom
import atom.http_interface
import atom.token_store
import gdata.auth
import gdata.gauth
AUTH_SERVER_HOST = 'https://www.google.com'
# When requesting an AuthSub token, it is often helpful to track the scope
# which is being requested. One way to accomplish this is to add a URL
# parameter to the 'next' URL which contains the requested scope. This
# constant is the default name (AKA key) for the URL parameter.
SCOPE_URL_PARAM_NAME = 'authsub_token_scope'
# When requesting an OAuth access token or authorization of an existing OAuth
# request token, it is often helpful to track the scope(s) which is/are being
# requested. One way to accomplish this is to add a URL parameter to the
# 'callback' URL which contains the requested scope. This constant is the
# default name (AKA key) for the URL parameter.
OAUTH_SCOPE_URL_PARAM_NAME = 'oauth_token_scope'
# Maps the service names used in ClientLogin to scope URLs.
CLIENT_LOGIN_SCOPES = gdata.gauth.AUTH_SCOPES
# Default parameters for GDataService.GetWithRetries method
DEFAULT_NUM_RETRIES = 3
DEFAULT_DELAY = 1
DEFAULT_BACKOFF = 2
def lookup_scopes(service_name):
"""Finds the scope URLs for the desired service.
In some cases, an unknown service may be used, and in those cases this
function will return None.
"""
if service_name in CLIENT_LOGIN_SCOPES:
return CLIENT_LOGIN_SCOPES[service_name]
return None
# Module level variable specifies which module should be used by GDataService
# objects to make HttpRequests. This setting can be overridden on each
# instance of GDataService.
# This module level variable is deprecated. Reassign the http_client member
# of a GDataService object instead.
http_request_handler = atom.service
class Error(Exception):
pass
class CaptchaRequired(Error):
pass
class BadAuthentication(Error):
pass
class NotAuthenticated(Error):
pass
class NonAuthSubToken(Error):
pass
class NonOAuthToken(Error):
pass
class RequestError(Error):
pass
class UnexpectedReturnType(Error):
pass
class BadAuthenticationServiceURL(Error):
pass
class FetchingOAuthRequestTokenFailed(RequestError):
pass
class TokenUpgradeFailed(RequestError):
pass
class RevokingOAuthTokenFailed(RequestError):
pass
class AuthorizationRequired(Error):
pass
class TokenHadNoScope(Error):
pass
class RanOutOfTries(Error):
pass
class GDataService(atom.service.AtomService):
"""Contains elements needed for GData login and CRUD request headers.
Maintains additional headers (tokens for example) needed for the GData
services to allow a user to perform inserts, updates, and deletes.
"""
# The hander member is deprecated, use http_client instead.
handler = None
# The auth_token member is deprecated, use the token_store instead.
auth_token = None
# The tokens dict is deprecated in favor of the token_store.
tokens = None
def __init__(self, email=None, password=None, account_type='HOSTED_OR_GOOGLE',
service=None, auth_service_url=None, source=None, server=None,
additional_headers=None, handler=None, tokens=None,
http_client=None, token_store=None):
"""Creates an object of type GDataService.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
account_type: string (optional) The type of account to use. Use
'GOOGLE' for regular Google accounts or 'HOSTED' for Google
Apps accounts, or 'HOSTED_OR_GOOGLE' to try finding a HOSTED
account first and, if it doesn't exist, try finding a regular
GOOGLE account. Default value: 'HOSTED_OR_GOOGLE'.
service: string (optional) The desired service for which credentials
will be obtained.
auth_service_url: string (optional) User-defined auth token request URL
allows users to explicitly specify where to send auth token requests.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'base.google.com'.
additional_headers: dictionary (optional) Any additional headers which
should be included with CRUD operations.
handler: module (optional) This parameter is deprecated and has been
replaced by http_client.
tokens: This parameter is deprecated, calls should be made to
token_store instead.
http_client: An object responsible for making HTTP requests using a
request method. If none is provided, a new instance of
atom.http.ProxiedHttpClient will be used.
token_store: Keeps a collection of authorization tokens which can be
applied to requests for a specific URLs. Critical methods are
find_token based on a URL (atom.url.Url or a string), add_token,
and remove_token.
"""
atom.service.AtomService.__init__(self, http_client=http_client,
token_store=token_store)
self.email = email
self.password = password
self.account_type = account_type
self.service = service
self.auth_service_url = auth_service_url
self.server = server
self.additional_headers = additional_headers or {}
self._oauth_input_params = None
self.__SetSource(source)
self.__captcha_token = None
self.__captcha_url = None
self.__gsessionid = None
if http_request_handler.__name__ == 'gdata.urlfetch':
import gdata.alt.appengine
self.http_client = gdata.alt.appengine.AppEngineHttpClient()
def _SetSessionId(self, session_id):
"""Used in unit tests to simulate a 302 which sets a gsessionid."""
self.__gsessionid = session_id
# Define properties for GDataService
def _SetAuthSubToken(self, auth_token, scopes=None):
"""Deprecated, use SetAuthSubToken instead."""
self.SetAuthSubToken(auth_token, scopes=scopes)
def __SetAuthSubToken(self, auth_token, scopes=None):
"""Deprecated, use SetAuthSubToken instead."""
self._SetAuthSubToken(auth_token, scopes=scopes)
def _GetAuthToken(self):
"""Returns the auth token used for authenticating requests.
Returns:
string
"""
current_scopes = lookup_scopes(self.service)
if current_scopes:
token = self.token_store.find_token(current_scopes[0])
if hasattr(token, 'auth_header'):
return token.auth_header
return None
def _GetCaptchaToken(self):
"""Returns a captcha token if the most recent login attempt generated one.
The captcha token is only set if the Programmatic Login attempt failed
because the Google service issued a captcha challenge.
Returns:
string
"""
return self.__captcha_token
def __GetCaptchaToken(self):
return self._GetCaptchaToken()
captcha_token = property(__GetCaptchaToken,
doc="""Get the captcha token for a login request.""")
def _GetCaptchaURL(self):
"""Returns the URL of the captcha image if a login attempt generated one.
The captcha URL is only set if the Programmatic Login attempt failed
because the Google service issued a captcha challenge.
Returns:
string
"""
return self.__captcha_url
def __GetCaptchaURL(self):
return self._GetCaptchaURL()
captcha_url = property(__GetCaptchaURL,
doc="""Get the captcha URL for a login request.""")
def GetGeneratorFromLinkFinder(self, link_finder, func,
num_retries=DEFAULT_NUM_RETRIES,
delay=DEFAULT_DELAY,
backoff=DEFAULT_BACKOFF):
"""returns a generator for pagination"""
yield link_finder
next = link_finder.GetNextLink()
while next is not None:
next_feed = func(str(self.GetWithRetries(
next.href, num_retries=num_retries, delay=delay, backoff=backoff)))
yield next_feed
next = next_feed.GetNextLink()
def _GetElementGeneratorFromLinkFinder(self, link_finder, func,
num_retries=DEFAULT_NUM_RETRIES,
delay=DEFAULT_DELAY,
backoff=DEFAULT_BACKOFF):
for element in self.GetGeneratorFromLinkFinder(link_finder, func,
num_retries=num_retries,
delay=delay,
backoff=backoff).entry:
yield element
def GetOAuthInputParameters(self):
return self._oauth_input_params
def SetOAuthInputParameters(self, signature_method, consumer_key,
consumer_secret=None, rsa_key=None,
two_legged_oauth=False, requestor_id=None):
"""Sets parameters required for using OAuth authentication mechanism.
NOTE: Though consumer_secret and rsa_key are optional, either of the two
is required depending on the value of the signature_method.
Args:
signature_method: class which provides implementation for strategy class
oauth.oauth.OAuthSignatureMethod. Signature method to be used for
signing each request. Valid implementations are provided as the
constants defined by gdata.auth.OAuthSignatureMethod. Currently
they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
gdata.auth.OAuthSignatureMethod.HMAC_SHA1
consumer_key: string Domain identifying third_party web application.
consumer_secret: string (optional) Secret generated during registration.
Required only for HMAC_SHA1 signature method.
rsa_key: string (optional) Private key required for RSA_SHA1 signature
method.
two_legged_oauth: boolean (optional) Enables two-legged OAuth process.
requestor_id: string (optional) User email adress to make requests on
their behalf. This parameter should only be set when two_legged_oauth
is True.
"""
self._oauth_input_params = gdata.auth.OAuthInputParams(
signature_method, consumer_key, consumer_secret=consumer_secret,
rsa_key=rsa_key, requestor_id=requestor_id)
if two_legged_oauth:
oauth_token = gdata.auth.OAuthToken(
oauth_input_params=self._oauth_input_params)
self.SetOAuthToken(oauth_token)
def FetchOAuthRequestToken(self, scopes=None, extra_parameters=None,
request_url='%s/accounts/OAuthGetRequestToken' % \
AUTH_SERVER_HOST, oauth_callback=None):
"""Fetches and sets the OAuth request token and returns it.
Args:
scopes: string or list of string base URL(s) of the service(s) to be
accessed. If None, then this method tries to determine the
scope(s) from the current service.
extra_parameters: dict (optional) key-value pairs as any additional
parameters to be included in the URL and signature while making a
request for fetching an OAuth request token. All the OAuth parameters
are added by default. But if provided through this argument, any
default parameters will be overwritten. For e.g. a default parameter
oauth_version 1.0 can be overwritten if
extra_parameters = {'oauth_version': '2.0'}
request_url: Request token URL. The default is
'https://www.google.com/accounts/OAuthGetRequestToken'.
oauth_callback: str (optional) If set, it is assume the client is using
the OAuth v1.0a protocol where the callback url is sent in the
request token step. If the oauth_callback is also set in
extra_params, this value will override that one.
Returns:
The fetched request token as a gdata.auth.OAuthToken object.
Raises:
FetchingOAuthRequestTokenFailed if the server responded to the request
with an error.
"""
if scopes is None:
scopes = lookup_scopes(self.service)
if not isinstance(scopes, (list, tuple)):
scopes = [scopes,]
if oauth_callback:
if extra_parameters is not None:
extra_parameters['oauth_callback'] = oauth_callback
else:
extra_parameters = {'oauth_callback': oauth_callback}
request_token_url = gdata.auth.GenerateOAuthRequestTokenUrl(
self._oauth_input_params, scopes,
request_token_url=request_url,
extra_parameters=extra_parameters)
response = self.http_client.request('GET', str(request_token_url))
if response.status == 200:
token = gdata.auth.OAuthToken()
token.set_token_string(response.read())
token.scopes = scopes
token.oauth_input_params = self._oauth_input_params
self.SetOAuthToken(token)
return token
error = {
'status': response.status,
'reason': 'Non 200 response on fetch request token',
'body': response.read()
}
raise FetchingOAuthRequestTokenFailed(error)
def SetOAuthToken(self, oauth_token):
"""Attempts to set the current token and add it to the token store.
The oauth_token can be any OAuth token i.e. unauthorized request token,
authorized request token or access token.
This method also attempts to add the token to the token store.
Use this method any time you want the current token to point to the
oauth_token passed. For e.g. call this method with the request token
you receive from FetchOAuthRequestToken.
Args:
request_token: gdata.auth.OAuthToken OAuth request token.
"""
if self.auto_set_current_token:
self.current_token = oauth_token
if self.auto_store_tokens:
self.token_store.add_token(oauth_token)
def GenerateOAuthAuthorizationURL(
self, request_token=None, callback_url=None, extra_params=None,
include_scopes_in_callback=False,
scopes_param_prefix=OAUTH_SCOPE_URL_PARAM_NAME,
request_url='%s/accounts/OAuthAuthorizeToken' % AUTH_SERVER_HOST):
"""Generates URL at which user will login to authorize the request token.
Args:
request_token: gdata.auth.OAuthToken (optional) OAuth request token.
If not specified, then the current token will be used if it is of
type <gdata.auth.OAuthToken>, else it is found by looking in the
token_store by looking for a token for the current scope.
callback_url: string (optional) The URL user will be sent to after
logging in and granting access.
extra_params: dict (optional) Additional parameters to be sent.
include_scopes_in_callback: Boolean (default=False) if set to True, and
if 'callback_url' is present, the 'callback_url' will be modified to
include the scope(s) from the request token as a URL parameter. The
key for the 'callback' URL's scope parameter will be
OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
a parameter to the 'callback' URL, is that the page which receives
the OAuth token will be able to tell which URLs the token grants
access to.
scopes_param_prefix: string (default='oauth_token_scope') The URL
parameter key which maps to the list of valid scopes for the token.
This URL parameter will be included in the callback URL along with
the scopes of the token as value if include_scopes_in_callback=True.
request_url: Authorization URL. The default is
'https://www.google.com/accounts/OAuthAuthorizeToken'.
Returns:
A string URL at which the user is required to login.
Raises:
NonOAuthToken if the user's request token is not an OAuth token or if a
request token was not available.
"""
if request_token and not isinstance(request_token, gdata.auth.OAuthToken):
raise NonOAuthToken
if not request_token:
if isinstance(self.current_token, gdata.auth.OAuthToken):
request_token = self.current_token
else:
current_scopes = lookup_scopes(self.service)
if current_scopes:
token = self.token_store.find_token(current_scopes[0])
if isinstance(token, gdata.auth.OAuthToken):
request_token = token
if not request_token:
raise NonOAuthToken
return str(gdata.auth.GenerateOAuthAuthorizationUrl(
request_token,
authorization_url=request_url,
callback_url=callback_url, extra_params=extra_params,
include_scopes_in_callback=include_scopes_in_callback,
scopes_param_prefix=scopes_param_prefix))
def UpgradeToOAuthAccessToken(self, authorized_request_token=None,
request_url='%s/accounts/OAuthGetAccessToken' \
% AUTH_SERVER_HOST, oauth_version='1.0',
oauth_verifier=None):
"""Upgrades the authorized request token to an access token and returns it
Args:
authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
token. If not specified, then the current token will be used if it is
of type <gdata.auth.OAuthToken>, else it is found by looking in the
token_store by looking for a token for the current scope.
request_url: Access token URL. The default is
'https://www.google.com/accounts/OAuthGetAccessToken'.
oauth_version: str (default='1.0') oauth_version parameter. All other
'oauth_' parameters are added by default. This parameter too, is
added by default but here you can override it's value.
oauth_verifier: str (optional) If present, it is assumed that the client
will use the OAuth v1.0a protocol which includes passing the
oauth_verifier (as returned by the SP) in the access token step.
Returns:
Access token
Raises:
NonOAuthToken if the user's authorized request token is not an OAuth
token or if an authorized request token was not available.
TokenUpgradeFailed if the server responded to the request with an
error.
"""
if (authorized_request_token and
not isinstance(authorized_request_token, gdata.auth.OAuthToken)):
raise NonOAuthToken
if not authorized_request_token:
if isinstance(self.current_token, gdata.auth.OAuthToken):
authorized_request_token = self.current_token
else:
current_scopes = lookup_scopes(self.service)
if current_scopes:
token = self.token_store.find_token(current_scopes[0])
if isinstance(token, gdata.auth.OAuthToken):
authorized_request_token = token
if not authorized_request_token:
raise NonOAuthToken
access_token_url = gdata.auth.GenerateOAuthAccessTokenUrl(
authorized_request_token,
self._oauth_input_params,
access_token_url=request_url,
oauth_version=oauth_version,
oauth_verifier=oauth_verifier)
response = self.http_client.request('GET', str(access_token_url))
if response.status == 200:
token = gdata.auth.OAuthTokenFromHttpBody(response.read())
token.scopes = authorized_request_token.scopes
token.oauth_input_params = authorized_request_token.oauth_input_params
self.SetOAuthToken(token)
return token
else:
raise TokenUpgradeFailed({'status': response.status,
'reason': 'Non 200 response on upgrade',
'body': response.read()})
def RevokeOAuthToken(self, request_url='%s/accounts/AuthSubRevokeToken' % \
AUTH_SERVER_HOST):
"""Revokes an existing OAuth token.
request_url: Token revoke URL. The default is
'https://www.google.com/accounts/AuthSubRevokeToken'.
Raises:
NonOAuthToken if the user's auth token is not an OAuth token.
RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
"""
scopes = lookup_scopes(self.service)
token = self.token_store.find_token(scopes[0])
if not isinstance(token, gdata.auth.OAuthToken):
raise NonOAuthToken
response = token.perform_request(self.http_client, 'GET', request_url,
headers={'Content-Type':'application/x-www-form-urlencoded'})
if response.status == 200:
self.token_store.remove_token(token)
else:
raise RevokingOAuthTokenFailed
def GetAuthSubToken(self):
"""Returns the AuthSub token as a string.
If the token is an gdta.auth.AuthSubToken, the Authorization Label
("AuthSub token") is removed.
This method examines the current_token to see if it is an AuthSubToken
or SecureAuthSubToken. If not, it searches the token_store for a token
which matches the current scope.
The current scope is determined by the service name string member.
Returns:
If the current_token is set to an AuthSubToken/SecureAuthSubToken,
return the token string. If there is no current_token, a token string
for a token which matches the service object's default scope is returned.
If there are no tokens valid for the scope, returns None.
"""
if isinstance(self.current_token, gdata.auth.AuthSubToken):
return self.current_token.get_token_string()
current_scopes = lookup_scopes(self.service)
if current_scopes:
token = self.token_store.find_token(current_scopes[0])
if isinstance(token, gdata.auth.AuthSubToken):
return token.get_token_string()
else:
token = self.token_store.find_token(atom.token_store.SCOPE_ALL)
if isinstance(token, gdata.auth.ClientLoginToken):
return token.get_token_string()
return None
def SetAuthSubToken(self, token, scopes=None, rsa_key=None):
"""Sets the token sent in requests to an AuthSub token.
Sets the current_token and attempts to add the token to the token_store.
Only use this method if you have received a token from the AuthSub
service. The auth token is set automatically when UpgradeToSessionToken()
is used. See documentation for Google AuthSub here:
http://code.google.com/apis/accounts/AuthForWebApps.html
Args:
token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
The token returned by the AuthSub service. If the token is an
AuthSubToken or SecureAuthSubToken, the scope information stored in
the token is used. If the token is a string, the scopes parameter is
used to determine the valid scopes.
scopes: list of URLs for which the token is valid. This is only used
if the token parameter is a string.
rsa_key: string (optional) Private key required for RSA_SHA1 signature
method. This parameter is necessary if the token is a string
representing a secure token.
"""
if not isinstance(token, gdata.auth.AuthSubToken):
token_string = token
if rsa_key:
token = gdata.auth.SecureAuthSubToken(rsa_key)
else:
token = gdata.auth.AuthSubToken()
token.set_token_string(token_string)
# If no scopes were set for the token, use the scopes passed in, or
# try to determine the scopes based on the current service name. If
# all else fails, set the token to match all requests.
if not token.scopes:
if scopes is None:
scopes = lookup_scopes(self.service)
if scopes is None:
scopes = [atom.token_store.SCOPE_ALL]
token.scopes = scopes
if self.auto_set_current_token:
self.current_token = token
if self.auto_store_tokens:
self.token_store.add_token(token)
def GetClientLoginToken(self):
"""Returns the token string for the current token or a token matching the
service scope.
If the current_token is a ClientLoginToken, the token string for
the current token is returned. If the current_token is not set, this method
searches for a token in the token_store which is valid for the service
object's current scope.
The current scope is determined by the service name string member.
The token string is the end of the Authorization header, it doesn not
include the ClientLogin label.
"""
if isinstance(self.current_token, gdata.auth.ClientLoginToken):
return self.current_token.get_token_string()
current_scopes = lookup_scopes(self.service)
if current_scopes:
token = self.token_store.find_token(current_scopes[0])
if isinstance(token, gdata.auth.ClientLoginToken):
return token.get_token_string()
else:
token = self.token_store.find_token(atom.token_store.SCOPE_ALL)
if isinstance(token, gdata.auth.ClientLoginToken):
return token.get_token_string()
return None
def SetClientLoginToken(self, token, scopes=None):
"""Sets the token sent in requests to a ClientLogin token.
This method sets the current_token to a new ClientLoginToken and it
also attempts to add the ClientLoginToken to the token_store.
Only use this method if you have received a token from the ClientLogin
service. The auth_token is set automatically when ProgrammaticLogin()
is used. See documentation for Google ClientLogin here:
http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html
Args:
token: string or instance of a ClientLoginToken.
"""
if not isinstance(token, gdata.auth.ClientLoginToken):
token_string = token
token = gdata.auth.ClientLoginToken()
token.set_token_string(token_string)
if not token.scopes:
if scopes is None:
scopes = lookup_scopes(self.service)
if scopes is None:
scopes = [atom.token_store.SCOPE_ALL]
token.scopes = scopes
if self.auto_set_current_token:
self.current_token = token
if self.auto_store_tokens:
self.token_store.add_token(token)
# Private methods to create the source property.
def __GetSource(self):
return self.__source
def __SetSource(self, new_source):
self.__source = new_source
# Update the UserAgent header to include the new application name.
self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % (
self.__source,)
source = property(__GetSource, __SetSource,
doc="""The source is the name of the application making the request.
It should be in the form company_id-app_name-app_version""")
# Authentication operations
def ProgrammaticLogin(self, captcha_token=None, captcha_response=None):
"""Authenticates the user and sets the GData Auth token.
Login retreives a temporary auth token which must be used with all
requests to GData services. The auth token is stored in the GData client
object.
Login is also used to respond to a captcha challenge. If the user's login
attempt failed with a CaptchaRequired error, the user can respond by
calling Login with the captcha token and the answer to the challenge.
Args:
captcha_token: string (optional) The identifier for the captcha challenge
which was presented to the user.
captcha_response: string (optional) The user's answer to the captch
challenge.
Raises:
CaptchaRequired if the login service will require a captcha response
BadAuthentication if the login service rejected the username or password
Error if the login service responded with a 403 different from the above
"""
request_body = gdata.auth.generate_client_login_request_body(self.email,
self.password, self.service, self.source, self.account_type,
captcha_token, captcha_response)
# If the user has defined their own authentication service URL,
# send the ClientLogin requests to this URL:
if not self.auth_service_url:
auth_request_url = AUTH_SERVER_HOST + '/accounts/ClientLogin'
else:
auth_request_url = self.auth_service_url
auth_response = self.http_client.request('POST', auth_request_url,
data=request_body,
headers={'Content-Type':'application/x-www-form-urlencoded'})
response_body = auth_response.read()
if auth_response.status == 200:
# TODO: insert the token into the token_store directly.
self.SetClientLoginToken(
gdata.auth.get_client_login_token(response_body))
self.__captcha_token = None
self.__captcha_url = None
elif auth_response.status == 403:
# Examine each line to find the error type and the captcha token and
# captch URL if they are present.
captcha_parameters = gdata.auth.get_captcha_challenge(response_body,
captcha_base_url='%s/accounts/' % AUTH_SERVER_HOST)
if captcha_parameters:
self.__captcha_token = captcha_parameters['token']
self.__captcha_url = captcha_parameters['url']
raise CaptchaRequired, 'Captcha Required'
elif response_body.splitlines()[0] == 'Error=BadAuthentication':
self.__captcha_token = None
self.__captcha_url = None
raise BadAuthentication, 'Incorrect username or password'
else:
self.__captcha_token = None
self.__captcha_url = None
raise Error, 'Server responded with a 403 code'
elif auth_response.status == 302:
self.__captcha_token = None
self.__captcha_url = None
# Google tries to redirect all bad URLs back to
# http://www.google.<locale>. If a redirect
# attempt is made, assume the user has supplied an incorrect authentication URL
raise BadAuthenticationServiceURL, 'Server responded with a 302 code.'
def ClientLogin(self, username, password, account_type=None, service=None,
auth_service_url=None, source=None, captcha_token=None,
captcha_response=None):
"""Convenience method for authenticating using ProgrammaticLogin.
Sets values for email, password, and other optional members.
Args:
username:
password:
account_type: string (optional)
service: string (optional)
auth_service_url: string (optional)
captcha_token: string (optional)
captcha_response: string (optional)
"""
self.email = username
self.password = password
if account_type:
self.account_type = account_type
if service:
self.service = service
if source:
self.source = source
if auth_service_url:
self.auth_service_url = auth_service_url
self.ProgrammaticLogin(captcha_token, captcha_response)
def GenerateAuthSubURL(self, next, scope, secure=False, session=True,
domain='default'):
"""Generate a URL at which the user will login and be redirected back.
Users enter their credentials on a Google login page and a token is sent
to the URL specified in next. See documentation for AuthSub login at:
http://code.google.com/apis/accounts/docs/AuthSub.html
Args:
next: string The URL user will be sent to after logging in.
scope: string or list of strings. The URLs of the services to be
accessed.
secure: boolean (optional) Determines whether or not the issued token
is a secure token.
session: boolean (optional) Determines whether or not the issued token
can be upgraded to a session token.
"""
if not isinstance(scope, (list, tuple)):
scope = (scope,)
return gdata.auth.generate_auth_sub_url(next, scope, secure=secure,
session=session,
request_url='%s/accounts/AuthSubRequest' % AUTH_SERVER_HOST,
domain=domain)
def UpgradeToSessionToken(self, token=None):
"""Upgrades a single use AuthSub token to a session token.
Args:
token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
(optional) which is good for a single use but can be upgraded
to a session token. If no token is passed in, the token
is found by looking in the token_store by looking for a token
for the current scope.
Raises:
NonAuthSubToken if the user's auth token is not an AuthSub token
TokenUpgradeFailed if the server responded to the request with an
error.
"""
if token is None:
scopes = lookup_scopes(self.service)
if scopes:
token = self.token_store.find_token(scopes[0])
else:
token = self.token_store.find_token(atom.token_store.SCOPE_ALL)
if not isinstance(token, gdata.auth.AuthSubToken):
raise NonAuthSubToken
self.SetAuthSubToken(self.upgrade_to_session_token(token))
def upgrade_to_session_token(self, token):
"""Upgrades a single use AuthSub token to a session token.
Args:
token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
which is good for a single use but can be upgraded to a
session token.
Returns:
The upgraded token as a gdata.auth.AuthSubToken object.
Raises:
TokenUpgradeFailed if the server responded to the request with an
error.
"""
response = token.perform_request(self.http_client, 'GET',
AUTH_SERVER_HOST + '/accounts/AuthSubSessionToken',
headers={'Content-Type':'application/x-www-form-urlencoded'})
response_body = response.read()
if response.status == 200:
token.set_token_string(
gdata.auth.token_from_http_body(response_body))
return token
else:
raise TokenUpgradeFailed({'status': response.status,
'reason': 'Non 200 response on upgrade',
'body': response_body})
def RevokeAuthSubToken(self):
"""Revokes an existing AuthSub token.
Raises:
NonAuthSubToken if the user's auth token is not an AuthSub token
"""
scopes = lookup_scopes(self.service)
token = self.token_store.find_token(scopes[0])
if not isinstance(token, gdata.auth.AuthSubToken):
raise NonAuthSubToken
response = token.perform_request(self.http_client, 'GET',
AUTH_SERVER_HOST + '/accounts/AuthSubRevokeToken',
headers={'Content-Type':'application/x-www-form-urlencoded'})
if response.status == 200:
self.token_store.remove_token(token)
def AuthSubTokenInfo(self):
"""Fetches the AuthSub token's metadata from the server.
Raises:
NonAuthSubToken if the user's auth token is not an AuthSub token
"""
scopes = lookup_scopes(self.service)
token = self.token_store.find_token(scopes[0])
if not isinstance(token, gdata.auth.AuthSubToken):
raise NonAuthSubToken
response = token.perform_request(self.http_client, 'GET',
AUTH_SERVER_HOST + '/accounts/AuthSubTokenInfo',
headers={'Content-Type':'application/x-www-form-urlencoded'})
result_body = response.read()
if response.status == 200:
return result_body
else:
raise RequestError, {'status': response.status,
'body': result_body}
def GetWithRetries(self, uri, extra_headers=None, redirects_remaining=4,
encoding='UTF-8', converter=None, num_retries=DEFAULT_NUM_RETRIES,
delay=DEFAULT_DELAY, backoff=DEFAULT_BACKOFF, logger=None):
"""This is a wrapper method for Get with retrying capability.
To avoid various errors while retrieving bulk entities by retrying
specified times.
Note this method relies on the time module and so may not be usable
by default in Python2.2.
Args:
num_retries: Integer; the retry count.
delay: Integer; the initial delay for retrying.
backoff: Integer; how much the delay should lengthen after each failure.
logger: An object which has a debug(str) method to receive logging
messages. Recommended that you pass in the logging module.
Raises:
ValueError if any of the parameters has an invalid value.
RanOutOfTries on failure after number of retries.
"""
# Moved import for time module inside this method since time is not a
# default module in Python2.2. This method will not be usable in
# Python2.2.
import time
if backoff <= 1:
raise ValueError("backoff must be greater than 1")
num_retries = int(num_retries)
if num_retries < 0:
raise ValueError("num_retries must be 0 or greater")
if delay <= 0:
raise ValueError("delay must be greater than 0")
# Let's start
mtries, mdelay = num_retries, delay
while mtries > 0:
if mtries != num_retries:
if logger:
logger.debug("Retrying: %s" % uri)
try:
rv = self.Get(uri, extra_headers=extra_headers,
redirects_remaining=redirects_remaining,
encoding=encoding, converter=converter)
except SystemExit:
# Allow this error
raise
except RequestError, e:
# Error 500 is 'internal server error' and warrants a retry
# Error 503 is 'service unavailable' and warrants a retry
if e[0]['status'] not in [500, 503]:
raise e
# Else, fall through to the retry code...
except Exception, e:
if logger:
logger.debug(e)
# Fall through to the retry code...
else:
# This is the right path.
return rv
mtries -= 1
time.sleep(mdelay)
mdelay *= backoff
raise RanOutOfTries('Ran out of tries.')
# CRUD operations
def Get(self, uri, extra_headers=None, redirects_remaining=4,
encoding='UTF-8', converter=None):
"""Query the GData API with the given URI
The uri is the portion of the URI after the server value
(ex: www.google.com).
To perform a query against Google Base, set the server to
'base.google.com' and set the uri to '/base/feeds/...', where ... is
your query. For example, to find snippets for all digital cameras uri
should be set to: '/base/feeds/snippets?bq=digital+camera'
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dictionary (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
redirects_remaining: int (optional) Tracks the number of additional
redirects this method will allow. If the service object receives
a redirect and remaining is 0, it will not follow the redirect.
This was added to avoid infinite redirect loops.
encoding: string (optional) The character encoding for the server's
response. Default is UTF-8
converter: func (optional) A function which will transform
the server's results before it is returned. Example: use
GDataFeedFromString to parse the server response as if it
were a GDataFeed.
Returns:
If there is no ResultsTransformer specified in the call, a GDataFeed
or GDataEntry depending on which is sent from the server. If the
response is niether a feed or entry and there is no ResultsTransformer,
return a string. If there is a ResultsTransformer, the returned value
will be that of the ResultsTransformer function.
"""
if extra_headers is None:
extra_headers = {}
if self.__gsessionid is not None:
if uri.find('gsessionid=') < 0:
if uri.find('?') > -1:
uri += '&gsessionid=%s' % (self.__gsessionid,)
else:
uri += '?gsessionid=%s' % (self.__gsessionid,)
server_response = self.request('GET', uri,
headers=extra_headers)
result_body = server_response.read()
if server_response.status == 200:
if converter:
return converter(result_body)
# There was no ResultsTransformer specified, so try to convert the
# server's response into a GDataFeed.
feed = gdata.GDataFeedFromString(result_body)
if not feed:
# If conversion to a GDataFeed failed, try to convert the server's
# response to a GDataEntry.
entry = gdata.GDataEntryFromString(result_body)
if not entry:
# The server's response wasn't a feed, or an entry, so return the
# response body as a string.
return result_body
return entry
return feed
elif server_response.status == 302:
if redirects_remaining > 0:
location = (server_response.getheader('Location')
or server_response.getheader('location'))
if location is not None:
m = re.compile('[\?\&]gsessionid=(\w*\-)').search(location)
if m is not None:
self.__gsessionid = m.group(1)
return GDataService.Get(self, location, extra_headers, redirects_remaining - 1,
encoding=encoding, converter=converter)
else:
raise RequestError, {'status': server_response.status,
'reason': '302 received without Location header',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': 'Redirect received, but redirects_remaining <= 0',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': server_response.reason, 'body': result_body}
def GetMedia(self, uri, extra_headers=None):
"""Returns a MediaSource containing media and its metadata from the given
URI string.
"""
response_handle = self.request('GET', uri,
headers=extra_headers)
return gdata.MediaSource(response_handle, response_handle.getheader(
'Content-Type'),
response_handle.getheader('Content-Length'))
def GetEntry(self, uri, extra_headers=None):
"""Query the GData API with the given URI and receive an Entry.
See also documentation for gdata.service.Get
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dictionary (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
Returns:
A GDataEntry built from the XML in the server's response.
"""
result = GDataService.Get(self, uri, extra_headers,
converter=atom.EntryFromString)
if isinstance(result, atom.Entry):
return result
else:
raise UnexpectedReturnType, 'Server did not send an entry'
def GetFeed(self, uri, extra_headers=None,
converter=gdata.GDataFeedFromString):
"""Query the GData API with the given URI and receive a Feed.
See also documentation for gdata.service.Get
Args:
uri: string The query in the form of a URI. Example:
'/base/feeds/snippets?bq=digital+camera'.
extra_headers: dictionary (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
Returns:
A GDataFeed built from the XML in the server's response.
"""
result = GDataService.Get(self, uri, extra_headers, converter=converter)
if isinstance(result, atom.Feed):
return result
else:
raise UnexpectedReturnType, 'Server did not send a feed'
def GetNext(self, feed):
"""Requests the next 'page' of results in the feed.
This method uses the feed's next link to request an additional feed
and uses the class of the feed to convert the results of the GET request.
Args:
feed: atom.Feed or a subclass. The feed should contain a next link and
the type of the feed will be applied to the results from the
server. The new feed which is returned will be of the same class
as this feed which was passed in.
Returns:
A new feed representing the next set of results in the server's feed.
The type of this feed will match that of the feed argument.
"""
next_link = feed.GetNextLink()
# Create a closure which will convert an XML string to the class of
# the feed object passed in.
def ConvertToFeedClass(xml_string):
return atom.CreateClassFromXMLString(feed.__class__, xml_string)
# Make a GET request on the next link and use the above closure for the
# converted which processes the XML string from the server.
if next_link and next_link.href:
return GDataService.Get(self, next_link.href,
converter=ConvertToFeedClass)
else:
return None
def Post(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=4, media_source=None,
converter=None):
"""Insert or update data into a GData service at the given URI.
Args:
data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/base/feeds/items'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
media_source: MediaSource (optional) Container for the media to be sent
along with the entry, if provided.
converter: func (optional) A function which will be executed on the
server's response. Often this is a function like
GDataEntryFromString which will parse the body of the server's
response and return a GDataEntry.
Returns:
If the post succeeded, this method will return a GDataFeed, GDataEntry,
or the results of running converter on the server's result body (if
converter was specified).
"""
return GDataService.PostOrPut(self, 'POST', data, uri,
extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, redirects_remaining=redirects_remaining,
media_source=media_source, converter=converter)
def PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=4, media_source=None,
converter=None):
"""Insert data into a GData service at the given URI.
Args:
verb: string, either 'POST' or 'PUT'
data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/base/feeds/items'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
media_source: MediaSource (optional) Container for the media to be sent
along with the entry, if provided.
converter: func (optional) A function which will be executed on the
server's response. Often this is a function like
GDataEntryFromString which will parse the body of the server's
response and return a GDataEntry.
Returns:
If the post succeeded, this method will return a GDataFeed, GDataEntry,
or the results of running converter on the server's result body (if
converter was specified).
"""
if extra_headers is None:
extra_headers = {}
if self.__gsessionid is not None:
if uri.find('gsessionid=') < 0:
if url_params is None:
url_params = {}
url_params['gsessionid'] = self.__gsessionid
if data and media_source:
if ElementTree.iselement(data):
data_str = ElementTree.tostring(data)
else:
data_str = str(data)
multipart = []
multipart.append('Media multipart posting\r\n--END_OF_PART\r\n' + \
'Content-Type: application/atom+xml\r\n\r\n')
multipart.append('\r\n--END_OF_PART\r\nContent-Type: ' + \
media_source.content_type+'\r\n\r\n')
multipart.append('\r\n--END_OF_PART--\r\n')
extra_headers['MIME-version'] = '1.0'
extra_headers['Content-Length'] = str(len(multipart[0]) +
len(multipart[1]) + len(multipart[2]) +
len(data_str) + media_source.content_length)
extra_headers['Content-Type'] = 'multipart/related; boundary=END_OF_PART'
server_response = self.request(verb, uri,
data=[multipart[0], data_str, multipart[1], media_source.file_handle,
multipart[2]], headers=extra_headers, url_params=url_params)
result_body = server_response.read()
elif media_source or isinstance(data, gdata.MediaSource):
if isinstance(data, gdata.MediaSource):
media_source = data
extra_headers['Content-Length'] = str(media_source.content_length)
extra_headers['Content-Type'] = media_source.content_type
server_response = self.request(verb, uri,
data=media_source.file_handle, headers=extra_headers,
url_params=url_params)
result_body = server_response.read()
else:
http_data = data
if 'Content-Type' not in extra_headers:
content_type = 'application/atom+xml'
extra_headers['Content-Type'] = content_type
server_response = self.request(verb, uri, data=http_data,
headers=extra_headers, url_params=url_params)
result_body = server_response.read()
# Server returns 201 for most post requests, but when performing a batch
# request the server responds with a 200 on success.
if server_response.status == 201 or server_response.status == 200:
if converter:
return converter(result_body)
feed = gdata.GDataFeedFromString(result_body)
if not feed:
entry = gdata.GDataEntryFromString(result_body)
if not entry:
return result_body
return entry
return feed
elif server_response.status == 302:
if redirects_remaining > 0:
location = (server_response.getheader('Location')
or server_response.getheader('location'))
if location is not None:
m = re.compile('[\?\&]gsessionid=(\w*\-)').search(location)
if m is not None:
self.__gsessionid = m.group(1)
return GDataService.PostOrPut(self, verb, data, location,
extra_headers, url_params, escape_params,
redirects_remaining - 1, media_source, converter=converter)
else:
raise RequestError, {'status': server_response.status,
'reason': '302 received without Location header',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': 'Redirect received, but redirects_remaining <= 0',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': server_response.reason, 'body': result_body}
def Put(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=3, media_source=None,
converter=None):
"""Updates an entry at the given URI.
Args:
data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The
XML containing the updated data.
uri: string A URI indicating entry to which the update will be applied.
Example: '/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
converter: func (optional) A function which will be executed on the
server's response. Often this is a function like
GDataEntryFromString which will parse the body of the server's
response and return a GDataEntry.
Returns:
If the put succeeded, this method will return a GDataFeed, GDataEntry,
or the results of running converter on the server's result body (if
converter was specified).
"""
return GDataService.PostOrPut(self, 'PUT', data, uri,
extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params, redirects_remaining=redirects_remaining,
media_source=media_source, converter=converter)
def Delete(self, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=4):
"""Deletes the entry at the given URI.
Args:
uri: string The URI of the entry to be deleted. Example:
'/base/feeds/items/ITEM-ID'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'max-results': '250'} becomes &max-results=250
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
True if the entry was deleted.
"""
if extra_headers is None:
extra_headers = {}
if self.__gsessionid is not None:
if uri.find('gsessionid=') < 0:
if url_params is None:
url_params = {}
url_params['gsessionid'] = self.__gsessionid
server_response = self.request('DELETE', uri,
headers=extra_headers, url_params=url_params)
result_body = server_response.read()
if server_response.status == 200:
return True
elif server_response.status == 302:
if redirects_remaining > 0:
location = (server_response.getheader('Location')
or server_response.getheader('location'))
if location is not None:
m = re.compile('[\?\&]gsessionid=(\w*\-)').search(location)
if m is not None:
self.__gsessionid = m.group(1)
return GDataService.Delete(self, location, extra_headers,
url_params, escape_params, redirects_remaining - 1)
else:
raise RequestError, {'status': server_response.status,
'reason': '302 received without Location header',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': 'Redirect received, but redirects_remaining <= 0',
'body': result_body}
else:
raise RequestError, {'status': server_response.status,
'reason': server_response.reason, 'body': result_body}
def ExtractToken(url, scopes_included_in_next=True):
"""Gets the AuthSub token from the current page's URL.
Designed to be used on the URL that the browser is sent to after the user
authorizes this application at the page given by GenerateAuthSubRequestUrl.
Args:
url: The current page's URL. It should contain the token as a URL
parameter. Example: 'http://example.com/?...&token=abcd435'
scopes_included_in_next: If True, this function looks for a scope value
associated with the token. The scope is a URL parameter with the
key set to SCOPE_URL_PARAM_NAME. This parameter should be present
if the AuthSub request URL was generated using
GenerateAuthSubRequestUrl with include_scope_in_next set to True.
Returns:
A tuple containing the token string and a list of scope strings for which
this token should be valid. If the scope was not included in the URL, the
tuple will contain (token, None).
"""
parsed = urlparse.urlparse(url)
token = gdata.auth.AuthSubTokenFromUrl(parsed[4])
scopes = ''
if scopes_included_in_next:
for pair in parsed[4].split('&'):
if pair.startswith('%s=' % SCOPE_URL_PARAM_NAME):
scopes = urllib.unquote_plus(pair.split('=')[1])
return (token, scopes.split(' '))
def GenerateAuthSubRequestUrl(next, scopes, hd='default', secure=False,
session=True, request_url='https://www.google.com/accounts/AuthSubRequest',
include_scopes_in_next=True):
"""Creates a URL to request an AuthSub token to access Google services.
For more details on AuthSub, see the documentation here:
http://code.google.com/apis/accounts/docs/AuthSub.html
Args:
next: The URL where the browser should be sent after the user authorizes
the application. This page is responsible for receiving the token
which is embeded in the URL as a parameter.
scopes: The base URL to which access will be granted. Example:
'http://www.google.com/calendar/feeds' will grant access to all
URLs in the Google Calendar data API. If you would like a token for
multiple scopes, pass in a list of URL strings.
hd: The domain to which the user's account belongs. This is set to the
domain name if you are using Google Apps. Example: 'example.org'
Defaults to 'default'
secure: If set to True, all requests should be signed. The default is
False.
session: If set to True, the token received by the 'next' URL can be
upgraded to a multiuse session token. If session is set to False, the
token may only be used once and cannot be upgraded. Default is True.
request_url: The base of the URL to which the user will be sent to
authorize this application to access their data. The default is
'https://www.google.com/accounts/AuthSubRequest'.
include_scopes_in_next: Boolean if set to true, the 'next' parameter will
be modified to include the requested scope as a URL parameter. The
key for the next's scope parameter will be SCOPE_URL_PARAM_NAME. The
benefit of including the scope URL as a parameter to the next URL, is
that the page which receives the AuthSub token will be able to tell
which URLs the token grants access to.
Returns:
A URL string to which the browser should be sent.
"""
if isinstance(scopes, list):
scope = ' '.join(scopes)
else:
scope = scopes
if include_scopes_in_next:
if next.find('?') > -1:
next += '&%s' % urllib.urlencode({SCOPE_URL_PARAM_NAME:scope})
else:
next += '?%s' % urllib.urlencode({SCOPE_URL_PARAM_NAME:scope})
return gdata.auth.GenerateAuthSubUrl(next=next, scope=scope, secure=secure,
session=session, request_url=request_url, domain=hd)
class Query(dict):
"""Constructs a query URL to be used in GET requests
Url parameters are created by adding key-value pairs to this object as a
dict. For example, to add &max-results=25 to the URL do
my_query['max-results'] = 25
Category queries are created by adding category strings to the categories
member. All items in the categories list will be concatenated with the /
symbol (symbolizing a category x AND y restriction). If you would like to OR
2 categories, append them as one string with a | between the categories.
For example, do query.categories.append('Fritz|Laurie') to create a query
like this feed/-/Fritz%7CLaurie . This query will look for results in both
categories.
"""
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
"""Constructor for Query
Args:
feed: str (optional) The path for the feed (Examples:
'/base/feeds/snippets' or 'calendar/feeds/jo@gmail.com/private/full'
text_query: str (optional) The contents of the q query parameter. The
contents of the text_query are URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to the
query's items (key-value pairs).
categories: list (optional) List of category strings which should be
included as query categories. See
http://code.google.com/apis/gdata/reference.html#Queries for
details. If you want to get results from category A or B (both
categories), specify a single list item 'A|B'.
"""
self.feed = feed
self.categories = []
if text_query:
self.text_query = text_query
if isinstance(params, dict):
for param in params:
self[param] = params[param]
if isinstance(categories, list):
for category in categories:
self.categories.append(category)
def _GetTextQuery(self):
if 'q' in self.keys():
return self['q']
else:
return None
def _SetTextQuery(self, query):
self['q'] = query
text_query = property(_GetTextQuery, _SetTextQuery,
doc="""The feed query's q parameter""")
def _GetAuthor(self):
if 'author' in self.keys():
return self['author']
else:
return None
def _SetAuthor(self, query):
self['author'] = query
author = property(_GetAuthor, _SetAuthor,
doc="""The feed query's author parameter""")
def _GetAlt(self):
if 'alt' in self.keys():
return self['alt']
else:
return None
def _SetAlt(self, query):
self['alt'] = query
alt = property(_GetAlt, _SetAlt,
doc="""The feed query's alt parameter""")
def _GetUpdatedMin(self):
if 'updated-min' in self.keys():
return self['updated-min']
else:
return None
def _SetUpdatedMin(self, query):
self['updated-min'] = query
updated_min = property(_GetUpdatedMin, _SetUpdatedMin,
doc="""The feed query's updated-min parameter""")
def _GetUpdatedMax(self):
if 'updated-max' in self.keys():
return self['updated-max']
else:
return None
def _SetUpdatedMax(self, query):
self['updated-max'] = query
updated_max = property(_GetUpdatedMax, _SetUpdatedMax,
doc="""The feed query's updated-max parameter""")
def _GetPublishedMin(self):
if 'published-min' in self.keys():
return self['published-min']
else:
return None
def _SetPublishedMin(self, query):
self['published-min'] = query
published_min = property(_GetPublishedMin, _SetPublishedMin,
doc="""The feed query's published-min parameter""")
def _GetPublishedMax(self):
if 'published-max' in self.keys():
return self['published-max']
else:
return None
def _SetPublishedMax(self, query):
self['published-max'] = query
published_max = property(_GetPublishedMax, _SetPublishedMax,
doc="""The feed query's published-max parameter""")
def _GetStartIndex(self):
if 'start-index' in self.keys():
return self['start-index']
else:
return None
def _SetStartIndex(self, query):
if not isinstance(query, str):
query = str(query)
self['start-index'] = query
start_index = property(_GetStartIndex, _SetStartIndex,
doc="""The feed query's start-index parameter""")
def _GetMaxResults(self):
if 'max-results' in self.keys():
return self['max-results']
else:
return None
def _SetMaxResults(self, query):
if not isinstance(query, str):
query = str(query)
self['max-results'] = query
max_results = property(_GetMaxResults, _SetMaxResults,
doc="""The feed query's max-results parameter""")
def _GetOrderBy(self):
if 'orderby' in self.keys():
return self['orderby']
else:
return None
def _SetOrderBy(self, query):
self['orderby'] = query
orderby = property(_GetOrderBy, _SetOrderBy,
doc="""The feed query's orderby parameter""")
def ToUri(self):
q_feed = self.feed or ''
category_string = '/'.join(
[urllib.quote_plus(c) for c in self.categories])
# Add categories to the feed if there are any.
if len(self.categories) > 0:
q_feed = q_feed + '/-/' + category_string
return atom.service.BuildUri(q_feed, self)
def __str__(self):
return self.ToUri()
| agpl-3.0 |
thica/ORCA-Remote | src/ORCA/utils/Platform/generic/generic_CheckPermissions.py | 1 | 1299 | # -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# from ORCA.utils.Platform import OS_GetUserDataPath
from ORCA.utils.Platform import OS_GetUserDownloadsDataPath
import ORCA.Globals as Globals
def CheckPermissions() -> bool:
""" We assume to have all permissions, as long we do not have OS specific code"""
if Globals.oPathUserDownload is not None:
return Globals.oPathUserDownload.IsWriteable()
else:
return OS_GetUserDownloadsDataPath().IsWriteable()
| gpl-3.0 |
deveninfotech/deven-frappe | frappe/exceptions.py | 31 | 1658 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
# BEWARE don't put anything in this file except exceptions
from werkzeug.exceptions import NotFound
from MySQLdb import ProgrammingError as SQLError
class ValidationError(Exception):
http_status_code = 417
class AuthenticationError(Exception):
http_status_code = 401
class PermissionError(Exception):
http_status_code = 403
class DoesNotExistError(ValidationError):
http_status_code = 404
class NameError(Exception):
http_status_code = 409
class OutgoingEmailError(Exception):
http_status_code = 501
class SessionStopped(Exception):
http_status_code = 503
class UnsupportedMediaType(Exception):
http_status_code = 415
class DuplicateEntryError(NameError):pass
class DataError(ValidationError): pass
class UnknownDomainError(Exception): pass
class MappingMismatchError(ValidationError): pass
class InvalidStatusError(ValidationError): pass
class MandatoryError(ValidationError): pass
class InvalidSignatureError(ValidationError): pass
class RateLimitExceededError(ValidationError): pass
class CannotChangeConstantError(ValidationError): pass
class UpdateAfterSubmitError(ValidationError): pass
class LinkValidationError(ValidationError): pass
class CancelledLinkError(LinkValidationError): pass
class DocstatusTransitionError(ValidationError): pass
class TimestampMismatchError(ValidationError): pass
class EmptyTableError(ValidationError): pass
class LinkExistsError(ValidationError): pass
class InvalidEmailAddressError(ValidationError): pass
class TemplateNotFoundError(ValidationError): pass
| mit |
chrisbarber/dask | dask/tests/test_threaded.py | 3 | 2157 | from multiprocessing.pool import ThreadPool
import threading
from threading import Thread
from time import time, sleep
import pytest
from dask.context import set_options
from dask.threaded import get
from dask.utils_test import inc, add
def test_get():
dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
assert get(dsk, 'w') == 4
assert get(dsk, ['w', 'z']) == (4, 2)
def test_nested_get():
dsk = {'x': 1, 'y': 2, 'a': (add, 'x', 'y'), 'b': (sum, ['x', 'y'])}
assert get(dsk, ['a', 'b']) == (3, 3)
def test_get_without_computation():
dsk = {'x': 1}
assert get(dsk, 'x') == 1
def bad(x):
raise ValueError()
def test_exceptions_rise_to_top():
dsk = {'x': 1, 'y': (bad, 'x')}
pytest.raises(ValueError, lambda: get(dsk, 'y'))
def test_reuse_pool():
pool = ThreadPool()
with set_options(pool=pool):
assert get({'x': (inc, 1)}, 'x') == 2
assert get({'x': (inc, 1)}, 'x') == 2
def test_threaded_within_thread():
L = []
def f(i):
result = get({'x': (lambda: i,)}, 'x', num_workers=2)
L.append(result)
before = threading.active_count()
for i in range(20):
t = Thread(target=f, args=(1,))
t.daemon = True
t.start()
t.join()
assert L == [1]
del L[:]
start = time() # wait for most threads to join
while threading.active_count() > before + 10:
sleep(0.01)
assert time() < start + 5
def test_dont_spawn_too_many_threads():
before = threading.active_count()
dsk = {('x', i): (lambda: i,) for i in range(10)}
dsk['x'] = (sum, list(dsk))
for i in range(20):
get(dsk, 'x', num_workers=4)
after = threading.active_count()
assert after <= before + 8
def test_thread_safety():
def f(x):
return 1
dsk = {'x': (sleep, 0.05), 'y': (f, 'x')}
L = []
def test_f():
L.append(get(dsk, 'y'))
threads = []
for i in range(20):
t = Thread(target=test_f)
t.daemon = True
t.start()
threads.append(t)
for thread in threads:
thread.join()
assert L == [1] * 20
| bsd-3-clause |
ThiagoGarciaAlves/intellij-community | plugins/hg4idea/testData/bin/mercurial/localrepo.py | 90 | 102052 | # localrepo.py - read/write repository class for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import hex, nullid, short
from i18n import _
import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
import changelog, dirstate, filelog, manifest, context, bookmarks, phases
import lock, transaction, store, encoding
import scmutil, util, extensions, hook, error, revset
import match as matchmod
import merge as mergemod
import tags as tagsmod
from lock import release
import weakref, errno, os, time, inspect
import branchmap
propertycache = util.propertycache
filecache = scmutil.filecache
class repofilecache(filecache):
"""All filecache usage on repo are done for logic that should be unfiltered
"""
def __get__(self, repo, type=None):
return super(repofilecache, self).__get__(repo.unfiltered(), type)
def __set__(self, repo, value):
return super(repofilecache, self).__set__(repo.unfiltered(), value)
def __delete__(self, repo):
return super(repofilecache, self).__delete__(repo.unfiltered())
class storecache(repofilecache):
"""filecache for files in the store"""
def join(self, obj, fname):
return obj.sjoin(fname)
class unfilteredpropertycache(propertycache):
"""propertycache that apply to unfiltered repo only"""
def __get__(self, repo, type=None):
return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
class filteredpropertycache(propertycache):
"""propertycache that must take filtering in account"""
def cachevalue(self, obj, value):
object.__setattr__(obj, self.name, value)
def hasunfilteredcache(repo, name):
"""check if a repo has an unfilteredpropertycache value for <name>"""
return name in vars(repo.unfiltered())
def unfilteredmethod(orig):
"""decorate method that always need to be run on unfiltered version"""
def wrapper(repo, *args, **kwargs):
return orig(repo.unfiltered(), *args, **kwargs)
return wrapper
MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
class localpeer(peer.peerrepository):
'''peer for a local repo; reflects only the most recent API'''
def __init__(self, repo, caps=MODERNCAPS):
peer.peerrepository.__init__(self)
self._repo = repo.filtered('served')
self.ui = repo.ui
self._caps = repo._restrictcapabilities(caps)
self.requirements = repo.requirements
self.supportedformats = repo.supportedformats
def close(self):
self._repo.close()
def _capabilities(self):
return self._caps
def local(self):
return self._repo
def canpush(self):
return True
def url(self):
return self._repo.url()
def lookup(self, key):
return self._repo.lookup(key)
def branchmap(self):
return self._repo.branchmap()
def heads(self):
return self._repo.heads()
def known(self, nodes):
return self._repo.known(nodes)
def getbundle(self, source, heads=None, common=None):
return self._repo.getbundle(source, heads=heads, common=common)
# TODO We might want to move the next two calls into legacypeer and add
# unbundle instead.
def lock(self):
return self._repo.lock()
def addchangegroup(self, cg, source, url):
return self._repo.addchangegroup(cg, source, url)
def pushkey(self, namespace, key, old, new):
return self._repo.pushkey(namespace, key, old, new)
def listkeys(self, namespace):
return self._repo.listkeys(namespace)
def debugwireargs(self, one, two, three=None, four=None, five=None):
'''used to test argument passing over the wire'''
return "%s %s %s %s %s" % (one, two, three, four, five)
class locallegacypeer(localpeer):
'''peer extension which implements legacy methods too; used for tests with
restricted capabilities'''
def __init__(self, repo):
localpeer.__init__(self, repo, caps=LEGACYCAPS)
def branches(self, nodes):
return self._repo.branches(nodes)
def between(self, pairs):
return self._repo.between(pairs)
def changegroup(self, basenodes, source):
return self._repo.changegroup(basenodes, source)
def changegroupsubset(self, bases, heads, source):
return self._repo.changegroupsubset(bases, heads, source)
class localrepository(object):
supportedformats = set(('revlogv1', 'generaldelta'))
supported = supportedformats | set(('store', 'fncache', 'shared',
'dotencode'))
openerreqs = set(('revlogv1', 'generaldelta'))
requirements = ['revlogv1']
filtername = None
def _baserequirements(self, create):
return self.requirements[:]
def __init__(self, baseui, path=None, create=False):
self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
self.wopener = self.wvfs
self.root = self.wvfs.base
self.path = self.wvfs.join(".hg")
self.origroot = path
self.auditor = scmutil.pathauditor(self.root, self._checknested)
self.vfs = scmutil.vfs(self.path)
self.opener = self.vfs
self.baseui = baseui
self.ui = baseui.copy()
# A list of callback to shape the phase if no data were found.
# Callback are in the form: func(repo, roots) --> processed root.
# This list it to be filled by extension during repo setup
self._phasedefaults = []
try:
self.ui.readconfig(self.join("hgrc"), self.root)
extensions.loadall(self.ui)
except IOError:
pass
if not self.vfs.isdir():
if create:
if not self.wvfs.exists():
self.wvfs.makedirs()
self.vfs.makedir(notindexed=True)
requirements = self._baserequirements(create)
if self.ui.configbool('format', 'usestore', True):
self.vfs.mkdir("store")
requirements.append("store")
if self.ui.configbool('format', 'usefncache', True):
requirements.append("fncache")
if self.ui.configbool('format', 'dotencode', True):
requirements.append('dotencode')
# create an invalid changelog
self.vfs.append(
"00changelog.i",
'\0\0\0\2' # represents revlogv2
' dummy changelog to prevent using the old repo layout'
)
if self.ui.configbool('format', 'generaldelta', False):
requirements.append("generaldelta")
requirements = set(requirements)
else:
raise error.RepoError(_("repository %s not found") % path)
elif create:
raise error.RepoError(_("repository %s already exists") % path)
else:
try:
requirements = scmutil.readrequires(self.vfs, self.supported)
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
requirements = set()
self.sharedpath = self.path
try:
vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
realpath=True)
s = vfs.base
if not vfs.exists():
raise error.RepoError(
_('.hg/sharedpath points to nonexistent directory %s') % s)
self.sharedpath = s
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
self.spath = self.store.path
self.svfs = self.store.vfs
self.sopener = self.svfs
self.sjoin = self.store.join
self.vfs.createmode = self.store.createmode
self._applyrequirements(requirements)
if create:
self._writerequirements()
self._branchcaches = {}
self.filterpats = {}
self._datafilters = {}
self._transref = self._lockref = self._wlockref = None
# A cache for various files under .hg/ that tracks file changes,
# (used by the filecache decorator)
#
# Maps a property name to its util.filecacheentry
self._filecache = {}
# hold sets of revision to be filtered
# should be cleared when something might have changed the filter value:
# - new changesets,
# - phase change,
# - new obsolescence marker,
# - working directory parent change,
# - bookmark changes
self.filteredrevcache = {}
def close(self):
pass
def _restrictcapabilities(self, caps):
return caps
def _applyrequirements(self, requirements):
self.requirements = requirements
self.sopener.options = dict((r, 1) for r in requirements
if r in self.openerreqs)
def _writerequirements(self):
reqfile = self.opener("requires", "w")
for r in sorted(self.requirements):
reqfile.write("%s\n" % r)
reqfile.close()
def _checknested(self, path):
"""Determine if path is a legal nested repository."""
if not path.startswith(self.root):
return False
subpath = path[len(self.root) + 1:]
normsubpath = util.pconvert(subpath)
# XXX: Checking against the current working copy is wrong in
# the sense that it can reject things like
#
# $ hg cat -r 10 sub/x.txt
#
# if sub/ is no longer a subrepository in the working copy
# parent revision.
#
# However, it can of course also allow things that would have
# been rejected before, such as the above cat command if sub/
# is a subrepository now, but was a normal directory before.
# The old path auditor would have rejected by mistake since it
# panics when it sees sub/.hg/.
#
# All in all, checking against the working copy seems sensible
# since we want to prevent access to nested repositories on
# the filesystem *now*.
ctx = self[None]
parts = util.splitpath(subpath)
while parts:
prefix = '/'.join(parts)
if prefix in ctx.substate:
if prefix == normsubpath:
return True
else:
sub = ctx.sub(prefix)
return sub.checknested(subpath[len(prefix) + 1:])
else:
parts.pop()
return False
def peer(self):
return localpeer(self) # not cached to avoid reference cycle
def unfiltered(self):
"""Return unfiltered version of the repository
Intended to be overwritten by filtered repo."""
return self
def filtered(self, name):
"""Return a filtered version of a repository"""
# build a new class with the mixin and the current class
# (possibly subclass of the repo)
class proxycls(repoview.repoview, self.unfiltered().__class__):
pass
return proxycls(self, name)
@repofilecache('bookmarks')
def _bookmarks(self):
return bookmarks.bmstore(self)
@repofilecache('bookmarks.current')
def _bookmarkcurrent(self):
return bookmarks.readcurrent(self)
def bookmarkheads(self, bookmark):
name = bookmark.split('@', 1)[0]
heads = []
for mark, n in self._bookmarks.iteritems():
if mark.split('@', 1)[0] == name:
heads.append(n)
return heads
@storecache('phaseroots')
def _phasecache(self):
return phases.phasecache(self, self._phasedefaults)
@storecache('obsstore')
def obsstore(self):
store = obsolete.obsstore(self.sopener)
if store and not obsolete._enabled:
# message is rare enough to not be translated
msg = 'obsolete feature not enabled but %i markers found!\n'
self.ui.warn(msg % len(list(store)))
return store
@storecache('00changelog.i')
def changelog(self):
c = changelog.changelog(self.sopener)
if 'HG_PENDING' in os.environ:
p = os.environ['HG_PENDING']
if p.startswith(self.root):
c.readpending('00changelog.i.a')
return c
@storecache('00manifest.i')
def manifest(self):
return manifest.manifest(self.sopener)
@repofilecache('dirstate')
def dirstate(self):
warned = [0]
def validate(node):
try:
self.changelog.rev(node)
return node
except error.LookupError:
if not warned[0]:
warned[0] = True
self.ui.warn(_("warning: ignoring unknown"
" working parent %s!\n") % short(node))
return nullid
return dirstate.dirstate(self.opener, self.ui, self.root, validate)
def __getitem__(self, changeid):
if changeid is None:
return context.workingctx(self)
return context.changectx(self, changeid)
def __contains__(self, changeid):
try:
return bool(self.lookup(changeid))
except error.RepoLookupError:
return False
def __nonzero__(self):
return True
def __len__(self):
return len(self.changelog)
def __iter__(self):
return iter(self.changelog)
def revs(self, expr, *args):
'''Return a list of revisions matching the given revset'''
expr = revset.formatspec(expr, *args)
m = revset.match(None, expr)
return [r for r in m(self, list(self))]
def set(self, expr, *args):
'''
Yield a context for each matching revision, after doing arg
replacement via revset.formatspec
'''
for r in self.revs(expr, *args):
yield self[r]
def url(self):
return 'file:' + self.root
def hook(self, name, throw=False, **args):
return hook.hook(self.ui, self, name, throw, **args)
@unfilteredmethod
def _tag(self, names, node, message, local, user, date, extra={}):
if isinstance(names, str):
names = (names,)
branches = self.branchmap()
for name in names:
self.hook('pretag', throw=True, node=hex(node), tag=name,
local=local)
if name in branches:
self.ui.warn(_("warning: tag %s conflicts with existing"
" branch name\n") % name)
def writetags(fp, names, munge, prevtags):
fp.seek(0, 2)
if prevtags and prevtags[-1] != '\n':
fp.write('\n')
for name in names:
m = munge and munge(name) or name
if (self._tagscache.tagtypes and
name in self._tagscache.tagtypes):
old = self.tags().get(name, nullid)
fp.write('%s %s\n' % (hex(old), m))
fp.write('%s %s\n' % (hex(node), m))
fp.close()
prevtags = ''
if local:
try:
fp = self.opener('localtags', 'r+')
except IOError:
fp = self.opener('localtags', 'a')
else:
prevtags = fp.read()
# local tags are stored in the current charset
writetags(fp, names, None, prevtags)
for name in names:
self.hook('tag', node=hex(node), tag=name, local=local)
return
try:
fp = self.wfile('.hgtags', 'rb+')
except IOError, e:
if e.errno != errno.ENOENT:
raise
fp = self.wfile('.hgtags', 'ab')
else:
prevtags = fp.read()
# committed tags are stored in UTF-8
writetags(fp, names, encoding.fromlocal, prevtags)
fp.close()
self.invalidatecaches()
if '.hgtags' not in self.dirstate:
self[None].add(['.hgtags'])
m = matchmod.exact(self.root, '', ['.hgtags'])
tagnode = self.commit(message, user, date, extra=extra, match=m)
for name in names:
self.hook('tag', node=hex(node), tag=name, local=local)
return tagnode
def tag(self, names, node, message, local, user, date):
'''tag a revision with one or more symbolic names.
names is a list of strings or, when adding a single tag, names may be a
string.
if local is True, the tags are stored in a per-repository file.
otherwise, they are stored in the .hgtags file, and a new
changeset is committed with the change.
keyword arguments:
local: whether to store tags in non-version-controlled file
(default False)
message: commit message to use if committing
user: name of user to use if committing
date: date tuple to use if committing'''
if not local:
for x in self.status()[:5]:
if '.hgtags' in x:
raise util.Abort(_('working copy of .hgtags is changed '
'(please commit .hgtags manually)'))
self.tags() # instantiate the cache
self._tag(names, node, message, local, user, date)
@filteredpropertycache
def _tagscache(self):
'''Returns a tagscache object that contains various tags related
caches.'''
# This simplifies its cache management by having one decorated
# function (this one) and the rest simply fetch things from it.
class tagscache(object):
def __init__(self):
# These two define the set of tags for this repository. tags
# maps tag name to node; tagtypes maps tag name to 'global' or
# 'local'. (Global tags are defined by .hgtags across all
# heads, and local tags are defined in .hg/localtags.)
# They constitute the in-memory cache of tags.
self.tags = self.tagtypes = None
self.nodetagscache = self.tagslist = None
cache = tagscache()
cache.tags, cache.tagtypes = self._findtags()
return cache
def tags(self):
'''return a mapping of tag to node'''
t = {}
if self.changelog.filteredrevs:
tags, tt = self._findtags()
else:
tags = self._tagscache.tags
for k, v in tags.iteritems():
try:
# ignore tags to unknown nodes
self.changelog.rev(v)
t[k] = v
except (error.LookupError, ValueError):
pass
return t
def _findtags(self):
'''Do the hard work of finding tags. Return a pair of dicts
(tags, tagtypes) where tags maps tag name to node, and tagtypes
maps tag name to a string like \'global\' or \'local\'.
Subclasses or extensions are free to add their own tags, but
should be aware that the returned dicts will be retained for the
duration of the localrepo object.'''
# XXX what tagtype should subclasses/extensions use? Currently
# mq and bookmarks add tags, but do not set the tagtype at all.
# Should each extension invent its own tag type? Should there
# be one tagtype for all such "virtual" tags? Or is the status
# quo fine?
alltags = {} # map tag name to (node, hist)
tagtypes = {}
tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
# Build the return dicts. Have to re-encode tag names because
# the tags module always uses UTF-8 (in order not to lose info
# writing to the cache), but the rest of Mercurial wants them in
# local encoding.
tags = {}
for (name, (node, hist)) in alltags.iteritems():
if node != nullid:
tags[encoding.tolocal(name)] = node
tags['tip'] = self.changelog.tip()
tagtypes = dict([(encoding.tolocal(name), value)
for (name, value) in tagtypes.iteritems()])
return (tags, tagtypes)
def tagtype(self, tagname):
'''
return the type of the given tag. result can be:
'local' : a local tag
'global' : a global tag
None : tag does not exist
'''
return self._tagscache.tagtypes.get(tagname)
def tagslist(self):
'''return a list of tags ordered by revision'''
if not self._tagscache.tagslist:
l = []
for t, n in self.tags().iteritems():
r = self.changelog.rev(n)
l.append((r, t, n))
self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
return self._tagscache.tagslist
def nodetags(self, node):
'''return the tags associated with a node'''
if not self._tagscache.nodetagscache:
nodetagscache = {}
for t, n in self._tagscache.tags.iteritems():
nodetagscache.setdefault(n, []).append(t)
for tags in nodetagscache.itervalues():
tags.sort()
self._tagscache.nodetagscache = nodetagscache
return self._tagscache.nodetagscache.get(node, [])
def nodebookmarks(self, node):
marks = []
for bookmark, n in self._bookmarks.iteritems():
if n == node:
marks.append(bookmark)
return sorted(marks)
def branchmap(self):
'''returns a dictionary {branch: [branchheads]}'''
branchmap.updatecache(self)
return self._branchcaches[self.filtername]
def _branchtip(self, heads):
'''return the tipmost branch head in heads'''
tip = heads[-1]
for h in reversed(heads):
if not self[h].closesbranch():
tip = h
break
return tip
def branchtip(self, branch):
'''return the tip node for a given branch'''
if branch not in self.branchmap():
raise error.RepoLookupError(_("unknown branch '%s'") % branch)
return self._branchtip(self.branchmap()[branch])
def branchtags(self):
'''return a dict where branch names map to the tipmost head of
the branch, open heads come before closed'''
bt = {}
for bn, heads in self.branchmap().iteritems():
bt[bn] = self._branchtip(heads)
return bt
def lookup(self, key):
return self[key].node()
def lookupbranch(self, key, remote=None):
repo = remote or self
if key in repo.branchmap():
return key
repo = (remote and remote.local()) and remote or self
return repo[key].branch()
def known(self, nodes):
nm = self.changelog.nodemap
pc = self._phasecache
result = []
for n in nodes:
r = nm.get(n)
resp = not (r is None or pc.phase(self, r) >= phases.secret)
result.append(resp)
return result
def local(self):
return self
def cancopy(self):
return self.local() # so statichttprepo's override of local() works
def join(self, f):
return os.path.join(self.path, f)
def wjoin(self, f):
return os.path.join(self.root, f)
def file(self, f):
if f[0] == '/':
f = f[1:]
return filelog.filelog(self.sopener, f)
def changectx(self, changeid):
return self[changeid]
def parents(self, changeid=None):
'''get list of changectxs for parents of changeid'''
return self[changeid].parents()
def setparents(self, p1, p2=nullid):
copies = self.dirstate.setparents(p1, p2)
pctx = self[p1]
if copies:
# Adjust copy records, the dirstate cannot do it, it
# requires access to parents manifests. Preserve them
# only for entries added to first parent.
for f in copies:
if f not in pctx and copies[f] in pctx:
self.dirstate.copy(copies[f], f)
if p2 == nullid:
for f, s in sorted(self.dirstate.copies().items()):
if f not in pctx and s not in pctx:
self.dirstate.copy(None, f)
def filectx(self, path, changeid=None, fileid=None):
"""changeid can be a changeset revision, node, or tag.
fileid can be a file revision or node."""
return context.filectx(self, path, changeid, fileid)
def getcwd(self):
return self.dirstate.getcwd()
def pathto(self, f, cwd=None):
return self.dirstate.pathto(f, cwd)
def wfile(self, f, mode='r'):
return self.wopener(f, mode)
def _link(self, f):
return self.wvfs.islink(f)
def _loadfilter(self, filter):
if filter not in self.filterpats:
l = []
for pat, cmd in self.ui.configitems(filter):
if cmd == '!':
continue
mf = matchmod.match(self.root, '', [pat])
fn = None
params = cmd
for name, filterfn in self._datafilters.iteritems():
if cmd.startswith(name):
fn = filterfn
params = cmd[len(name):].lstrip()
break
if not fn:
fn = lambda s, c, **kwargs: util.filter(s, c)
# Wrap old filters not supporting keyword arguments
if not inspect.getargspec(fn)[2]:
oldfn = fn
fn = lambda s, c, **kwargs: oldfn(s, c)
l.append((mf, fn, params))
self.filterpats[filter] = l
return self.filterpats[filter]
def _filter(self, filterpats, filename, data):
for mf, fn, cmd in filterpats:
if mf(filename):
self.ui.debug("filtering %s through %s\n" % (filename, cmd))
data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
break
return data
@unfilteredpropertycache
def _encodefilterpats(self):
return self._loadfilter('encode')
@unfilteredpropertycache
def _decodefilterpats(self):
return self._loadfilter('decode')
def adddatafilter(self, name, filter):
self._datafilters[name] = filter
def wread(self, filename):
if self._link(filename):
data = self.wvfs.readlink(filename)
else:
data = self.wopener.read(filename)
return self._filter(self._encodefilterpats, filename, data)
def wwrite(self, filename, data, flags):
data = self._filter(self._decodefilterpats, filename, data)
if 'l' in flags:
self.wopener.symlink(data, filename)
else:
self.wopener.write(filename, data)
if 'x' in flags:
self.wvfs.setflags(filename, False, True)
def wwritedata(self, filename, data):
return self._filter(self._decodefilterpats, filename, data)
def transaction(self, desc):
tr = self._transref and self._transref() or None
if tr and tr.running():
return tr.nest()
# abort here if the journal already exists
if self.svfs.exists("journal"):
raise error.RepoError(
_("abandoned transaction found - run hg recover"))
self._writejournal(desc)
renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
tr = transaction.transaction(self.ui.warn, self.sopener,
self.sjoin("journal"),
aftertrans(renames),
self.store.createmode)
self._transref = weakref.ref(tr)
return tr
def _journalfiles(self):
return ((self.svfs, 'journal'),
(self.vfs, 'journal.dirstate'),
(self.vfs, 'journal.branch'),
(self.vfs, 'journal.desc'),
(self.vfs, 'journal.bookmarks'),
(self.svfs, 'journal.phaseroots'))
def undofiles(self):
return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
def _writejournal(self, desc):
self.opener.write("journal.dirstate",
self.opener.tryread("dirstate"))
self.opener.write("journal.branch",
encoding.fromlocal(self.dirstate.branch()))
self.opener.write("journal.desc",
"%d\n%s\n" % (len(self), desc))
self.opener.write("journal.bookmarks",
self.opener.tryread("bookmarks"))
self.sopener.write("journal.phaseroots",
self.sopener.tryread("phaseroots"))
def recover(self):
lock = self.lock()
try:
if self.svfs.exists("journal"):
self.ui.status(_("rolling back interrupted transaction\n"))
transaction.rollback(self.sopener, self.sjoin("journal"),
self.ui.warn)
self.invalidate()
return True
else:
self.ui.warn(_("no interrupted transaction available\n"))
return False
finally:
lock.release()
def rollback(self, dryrun=False, force=False):
wlock = lock = None
try:
wlock = self.wlock()
lock = self.lock()
if self.svfs.exists("undo"):
return self._rollback(dryrun, force)
else:
self.ui.warn(_("no rollback information available\n"))
return 1
finally:
release(lock, wlock)
@unfilteredmethod # Until we get smarter cache management
def _rollback(self, dryrun, force):
ui = self.ui
try:
args = self.opener.read('undo.desc').splitlines()
(oldlen, desc, detail) = (int(args[0]), args[1], None)
if len(args) >= 3:
detail = args[2]
oldtip = oldlen - 1
if detail and ui.verbose:
msg = (_('repository tip rolled back to revision %s'
' (undo %s: %s)\n')
% (oldtip, desc, detail))
else:
msg = (_('repository tip rolled back to revision %s'
' (undo %s)\n')
% (oldtip, desc))
except IOError:
msg = _('rolling back unknown transaction\n')
desc = None
if not force and self['.'] != self['tip'] and desc == 'commit':
raise util.Abort(
_('rollback of last commit while not checked out '
'may lose data'), hint=_('use -f to force'))
ui.status(msg)
if dryrun:
return 0
parents = self.dirstate.parents()
self.destroying()
transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
if self.vfs.exists('undo.bookmarks'):
self.vfs.rename('undo.bookmarks', 'bookmarks')
if self.svfs.exists('undo.phaseroots'):
self.svfs.rename('undo.phaseroots', 'phaseroots')
self.invalidate()
parentgone = (parents[0] not in self.changelog.nodemap or
parents[1] not in self.changelog.nodemap)
if parentgone:
self.vfs.rename('undo.dirstate', 'dirstate')
try:
branch = self.opener.read('undo.branch')
self.dirstate.setbranch(encoding.tolocal(branch))
except IOError:
ui.warn(_('named branch could not be reset: '
'current branch is still \'%s\'\n')
% self.dirstate.branch())
self.dirstate.invalidate()
parents = tuple([p.rev() for p in self.parents()])
if len(parents) > 1:
ui.status(_('working directory now based on '
'revisions %d and %d\n') % parents)
else:
ui.status(_('working directory now based on '
'revision %d\n') % parents)
# TODO: if we know which new heads may result from this rollback, pass
# them to destroy(), which will prevent the branchhead cache from being
# invalidated.
self.destroyed()
return 0
def invalidatecaches(self):
if '_tagscache' in vars(self):
# can't use delattr on proxy
del self.__dict__['_tagscache']
self.unfiltered()._branchcaches.clear()
self.invalidatevolatilesets()
def invalidatevolatilesets(self):
self.filteredrevcache.clear()
obsolete.clearobscaches(self)
def invalidatedirstate(self):
'''Invalidates the dirstate, causing the next call to dirstate
to check if it was modified since the last time it was read,
rereading it if it has.
This is different to dirstate.invalidate() that it doesn't always
rereads the dirstate. Use dirstate.invalidate() if you want to
explicitly read the dirstate again (i.e. restoring it to a previous
known good state).'''
if hasunfilteredcache(self, 'dirstate'):
for k in self.dirstate._filecache:
try:
delattr(self.dirstate, k)
except AttributeError:
pass
delattr(self.unfiltered(), 'dirstate')
def invalidate(self):
unfiltered = self.unfiltered() # all file caches are stored unfiltered
for k in self._filecache:
# dirstate is invalidated separately in invalidatedirstate()
if k == 'dirstate':
continue
try:
delattr(unfiltered, k)
except AttributeError:
pass
self.invalidatecaches()
def _lock(self, lockname, wait, releasefn, acquirefn, desc):
try:
l = lock.lock(lockname, 0, releasefn, desc=desc)
except error.LockHeld, inst:
if not wait:
raise
self.ui.warn(_("waiting for lock on %s held by %r\n") %
(desc, inst.locker))
# default to 600 seconds timeout
l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
releasefn, desc=desc)
if acquirefn:
acquirefn()
return l
def _afterlock(self, callback):
"""add a callback to the current repository lock.
The callback will be executed on lock release."""
l = self._lockref and self._lockref()
if l:
l.postrelease.append(callback)
else:
callback()
def lock(self, wait=True):
'''Lock the repository store (.hg/store) and return a weak reference
to the lock. Use this before modifying the store (e.g. committing or
stripping). If you are opening a transaction, get a lock as well.)'''
l = self._lockref and self._lockref()
if l is not None and l.held:
l.lock()
return l
def unlock():
self.store.write()
if hasunfilteredcache(self, '_phasecache'):
self._phasecache.write()
for k, ce in self._filecache.items():
if k == 'dirstate' or k not in self.__dict__:
continue
ce.refresh()
l = self._lock(self.sjoin("lock"), wait, unlock,
self.invalidate, _('repository %s') % self.origroot)
self._lockref = weakref.ref(l)
return l
def wlock(self, wait=True):
'''Lock the non-store parts of the repository (everything under
.hg except .hg/store) and return a weak reference to the lock.
Use this before modifying files in .hg.'''
l = self._wlockref and self._wlockref()
if l is not None and l.held:
l.lock()
return l
def unlock():
self.dirstate.write()
self._filecache['dirstate'].refresh()
l = self._lock(self.join("wlock"), wait, unlock,
self.invalidatedirstate, _('working directory of %s') %
self.origroot)
self._wlockref = weakref.ref(l)
return l
def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
"""
commit an individual file as part of a larger transaction
"""
fname = fctx.path()
text = fctx.data()
flog = self.file(fname)
fparent1 = manifest1.get(fname, nullid)
fparent2 = fparent2o = manifest2.get(fname, nullid)
meta = {}
copy = fctx.renamed()
if copy and copy[0] != fname:
# Mark the new revision of this file as a copy of another
# file. This copy data will effectively act as a parent
# of this new revision. If this is a merge, the first
# parent will be the nullid (meaning "look up the copy data")
# and the second one will be the other parent. For example:
#
# 0 --- 1 --- 3 rev1 changes file foo
# \ / rev2 renames foo to bar and changes it
# \- 2 -/ rev3 should have bar with all changes and
# should record that bar descends from
# bar in rev2 and foo in rev1
#
# this allows this merge to succeed:
#
# 0 --- 1 --- 3 rev4 reverts the content change from rev2
# \ / merging rev3 and rev4 should use bar@rev2
# \- 2 --- 4 as the merge base
#
cfname = copy[0]
crev = manifest1.get(cfname)
newfparent = fparent2
if manifest2: # branch merge
if fparent2 == nullid or crev is None: # copied on remote side
if cfname in manifest2:
crev = manifest2[cfname]
newfparent = fparent1
# find source in nearest ancestor if we've lost track
if not crev:
self.ui.debug(" %s: searching for copy revision for %s\n" %
(fname, cfname))
for ancestor in self[None].ancestors():
if cfname in ancestor:
crev = ancestor[cfname].filenode()
break
if crev:
self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
meta["copy"] = cfname
meta["copyrev"] = hex(crev)
fparent1, fparent2 = nullid, newfparent
else:
self.ui.warn(_("warning: can't find ancestor for '%s' "
"copied from '%s'!\n") % (fname, cfname))
elif fparent2 != nullid:
# is one parent an ancestor of the other?
fparentancestor = flog.ancestor(fparent1, fparent2)
if fparentancestor == fparent1:
fparent1, fparent2 = fparent2, nullid
elif fparentancestor == fparent2:
fparent2 = nullid
# is the file changed?
if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
changelist.append(fname)
return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
# are just the flags changed during merge?
if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
changelist.append(fname)
return fparent1
@unfilteredmethod
def commit(self, text="", user=None, date=None, match=None, force=False,
editor=False, extra={}):
"""Add a new revision to current repository.
Revision information is gathered from the working directory,
match can be used to filter the committed files. If editor is
supplied, it is called to get a commit message.
"""
def fail(f, msg):
raise util.Abort('%s: %s' % (f, msg))
if not match:
match = matchmod.always(self.root, '')
if not force:
vdirs = []
match.dir = vdirs.append
match.bad = fail
wlock = self.wlock()
try:
wctx = self[None]
merge = len(wctx.parents()) > 1
if (not force and merge and match and
(match.files() or match.anypats())):
raise util.Abort(_('cannot partially commit a merge '
'(do not specify files or patterns)'))
changes = self.status(match=match, clean=force)
if force:
changes[0].extend(changes[6]) # mq may commit unchanged files
# check subrepos
subs = []
commitsubs = set()
newstate = wctx.substate.copy()
# only manage subrepos and .hgsubstate if .hgsub is present
if '.hgsub' in wctx:
# we'll decide whether to track this ourselves, thanks
if '.hgsubstate' in changes[0]:
changes[0].remove('.hgsubstate')
if '.hgsubstate' in changes[2]:
changes[2].remove('.hgsubstate')
# compare current state to last committed state
# build new substate based on last committed state
oldstate = wctx.p1().substate
for s in sorted(newstate.keys()):
if not match(s):
# ignore working copy, use old state if present
if s in oldstate:
newstate[s] = oldstate[s]
continue
if not force:
raise util.Abort(
_("commit with new subrepo %s excluded") % s)
if wctx.sub(s).dirty(True):
if not self.ui.configbool('ui', 'commitsubrepos'):
raise util.Abort(
_("uncommitted changes in subrepo %s") % s,
hint=_("use --subrepos for recursive commit"))
subs.append(s)
commitsubs.add(s)
else:
bs = wctx.sub(s).basestate()
newstate[s] = (newstate[s][0], bs, newstate[s][2])
if oldstate.get(s, (None, None, None))[1] != bs:
subs.append(s)
# check for removed subrepos
for p in wctx.parents():
r = [s for s in p.substate if s not in newstate]
subs += [s for s in r if match(s)]
if subs:
if (not match('.hgsub') and
'.hgsub' in (wctx.modified() + wctx.added())):
raise util.Abort(
_("can't commit subrepos without .hgsub"))
changes[0].insert(0, '.hgsubstate')
elif '.hgsub' in changes[2]:
# clean up .hgsubstate when .hgsub is removed
if ('.hgsubstate' in wctx and
'.hgsubstate' not in changes[0] + changes[1] + changes[2]):
changes[2].insert(0, '.hgsubstate')
# make sure all explicit patterns are matched
if not force and match.files():
matched = set(changes[0] + changes[1] + changes[2])
for f in match.files():
f = self.dirstate.normalize(f)
if f == '.' or f in matched or f in wctx.substate:
continue
if f in changes[3]: # missing
fail(f, _('file not found!'))
if f in vdirs: # visited directory
d = f + '/'
for mf in matched:
if mf.startswith(d):
break
else:
fail(f, _("no match under directory!"))
elif f not in self.dirstate:
fail(f, _("file not tracked!"))
cctx = context.workingctx(self, text, user, date, extra, changes)
if (not force and not extra.get("close") and not merge
and not cctx.files()
and wctx.branch() == wctx.p1().branch()):
return None
if merge and cctx.deleted():
raise util.Abort(_("cannot commit merge with missing files"))
ms = mergemod.mergestate(self)
for f in changes[0]:
if f in ms and ms[f] == 'u':
raise util.Abort(_("unresolved merge conflicts "
"(see hg help resolve)"))
if editor:
cctx._text = editor(self, cctx, subs)
edited = (text != cctx._text)
# commit subs and write new state
if subs:
for s in sorted(commitsubs):
sub = wctx.sub(s)
self.ui.status(_('committing subrepository %s\n') %
subrepo.subrelpath(sub))
sr = sub.commit(cctx._text, user, date)
newstate[s] = (newstate[s][0], sr)
subrepo.writestate(self, newstate)
# Save commit message in case this transaction gets rolled back
# (e.g. by a pretxncommit hook). Leave the content alone on
# the assumption that the user will use the same editor again.
msgfn = self.savecommitmessage(cctx._text)
p1, p2 = self.dirstate.parents()
hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
try:
self.hook("precommit", throw=True, parent1=hookp1,
parent2=hookp2)
ret = self.commitctx(cctx, True)
except: # re-raises
if edited:
self.ui.write(
_('note: commit message saved in %s\n') % msgfn)
raise
# update bookmarks, dirstate and mergestate
bookmarks.update(self, [p1, p2], ret)
cctx.markcommitted(ret)
ms.reset()
finally:
wlock.release()
def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
self.hook("commit", node=node, parent1=parent1, parent2=parent2)
self._afterlock(commithook)
return ret
@unfilteredmethod
def commitctx(self, ctx, error=False):
"""Add a new revision to current repository.
Revision information is passed via the context argument.
"""
tr = lock = None
removed = list(ctx.removed())
p1, p2 = ctx.p1(), ctx.p2()
user = ctx.user()
lock = self.lock()
try:
tr = self.transaction("commit")
trp = weakref.proxy(tr)
if ctx.files():
m1 = p1.manifest().copy()
m2 = p2.manifest()
# check in files
new = {}
changed = []
linkrev = len(self)
for f in sorted(ctx.modified() + ctx.added()):
self.ui.note(f + "\n")
try:
fctx = ctx[f]
new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
changed)
m1.set(f, fctx.flags())
except OSError, inst:
self.ui.warn(_("trouble committing %s!\n") % f)
raise
except IOError, inst:
errcode = getattr(inst, 'errno', errno.ENOENT)
if error or errcode and errcode != errno.ENOENT:
self.ui.warn(_("trouble committing %s!\n") % f)
raise
else:
removed.append(f)
# update manifest
m1.update(new)
removed = [f for f in sorted(removed) if f in m1 or f in m2]
drop = [f for f in removed if f in m1]
for f in drop:
del m1[f]
mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
p2.manifestnode(), (new, drop))
files = changed + removed
else:
mn = p1.manifestnode()
files = []
# update changelog
self.changelog.delayupdate()
n = self.changelog.add(mn, files, ctx.description(),
trp, p1.node(), p2.node(),
user, ctx.date(), ctx.extra().copy())
p = lambda: self.changelog.writepending() and self.root or ""
xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
parent2=xp2, pending=p)
self.changelog.finalize(trp)
# set the new commit is proper phase
targetphase = phases.newcommitphase(self.ui)
if targetphase:
# retract boundary do not alter parent changeset.
# if a parent have higher the resulting phase will
# be compliant anyway
#
# if minimal phase was 0 we don't need to retract anything
phases.retractboundary(self, targetphase, [n])
tr.close()
branchmap.updatecache(self.filtered('served'))
return n
finally:
if tr:
tr.release()
lock.release()
@unfilteredmethod
def destroying(self):
'''Inform the repository that nodes are about to be destroyed.
Intended for use by strip and rollback, so there's a common
place for anything that has to be done before destroying history.
This is mostly useful for saving state that is in memory and waiting
to be flushed when the current lock is released. Because a call to
destroyed is imminent, the repo will be invalidated causing those
changes to stay in memory (waiting for the next unlock), or vanish
completely.
'''
# When using the same lock to commit and strip, the phasecache is left
# dirty after committing. Then when we strip, the repo is invalidated,
# causing those changes to disappear.
if '_phasecache' in vars(self):
self._phasecache.write()
@unfilteredmethod
def destroyed(self):
'''Inform the repository that nodes have been destroyed.
Intended for use by strip and rollback, so there's a common
place for anything that has to be done after destroying history.
'''
# When one tries to:
# 1) destroy nodes thus calling this method (e.g. strip)
# 2) use phasecache somewhere (e.g. commit)
#
# then 2) will fail because the phasecache contains nodes that were
# removed. We can either remove phasecache from the filecache,
# causing it to reload next time it is accessed, or simply filter
# the removed nodes now and write the updated cache.
self._phasecache.filterunknown(self)
self._phasecache.write()
# update the 'served' branch cache to help read only server process
# Thanks to branchcache collaboration this is done from the nearest
# filtered subset and it is expected to be fast.
branchmap.updatecache(self.filtered('served'))
# Ensure the persistent tag cache is updated. Doing it now
# means that the tag cache only has to worry about destroyed
# heads immediately after a strip/rollback. That in turn
# guarantees that "cachetip == currenttip" (comparing both rev
# and node) always means no nodes have been added or destroyed.
# XXX this is suboptimal when qrefresh'ing: we strip the current
# head, refresh the tag cache, then immediately add a new head.
# But I think doing it this way is necessary for the "instant
# tag cache retrieval" case to work.
self.invalidate()
def walk(self, match, node=None):
'''
walk recursively through the directory tree or a given
changeset, finding all files matched by the match
function
'''
return self[node].walk(match)
def status(self, node1='.', node2=None, match=None,
ignored=False, clean=False, unknown=False,
listsubrepos=False):
"""return status of files between two nodes or node and working
directory.
If node1 is None, use the first dirstate parent instead.
If node2 is None, compare node1 with working directory.
"""
def mfmatches(ctx):
mf = ctx.manifest().copy()
if match.always():
return mf
for fn in mf.keys():
if not match(fn):
del mf[fn]
return mf
if isinstance(node1, context.changectx):
ctx1 = node1
else:
ctx1 = self[node1]
if isinstance(node2, context.changectx):
ctx2 = node2
else:
ctx2 = self[node2]
working = ctx2.rev() is None
parentworking = working and ctx1 == self['.']
match = match or matchmod.always(self.root, self.getcwd())
listignored, listclean, listunknown = ignored, clean, unknown
# load earliest manifest first for caching reasons
if not working and ctx2.rev() < ctx1.rev():
ctx2.manifest()
if not parentworking:
def bad(f, msg):
# 'f' may be a directory pattern from 'match.files()',
# so 'f not in ctx1' is not enough
if f not in ctx1 and f not in ctx1.dirs():
self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
match.bad = bad
if working: # we need to scan the working dir
subrepos = []
if '.hgsub' in self.dirstate:
subrepos = sorted(ctx2.substate)
s = self.dirstate.status(match, subrepos, listignored,
listclean, listunknown)
cmp, modified, added, removed, deleted, unknown, ignored, clean = s
# check for any possibly clean files
if parentworking and cmp:
fixup = []
# do a full compare of any files that might have changed
for f in sorted(cmp):
if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
or ctx1[f].cmp(ctx2[f])):
modified.append(f)
else:
fixup.append(f)
# update dirstate for files that are actually clean
if fixup:
if listclean:
clean += fixup
try:
# updating the dirstate is optional
# so we don't wait on the lock
wlock = self.wlock(False)
try:
for f in fixup:
self.dirstate.normal(f)
finally:
wlock.release()
except error.LockError:
pass
if not parentworking:
mf1 = mfmatches(ctx1)
if working:
# we are comparing working dir against non-parent
# generate a pseudo-manifest for the working dir
mf2 = mfmatches(self['.'])
for f in cmp + modified + added:
mf2[f] = None
mf2.set(f, ctx2.flags(f))
for f in removed:
if f in mf2:
del mf2[f]
else:
# we are comparing two revisions
deleted, unknown, ignored = [], [], []
mf2 = mfmatches(ctx2)
modified, added, clean = [], [], []
withflags = mf1.withflags() | mf2.withflags()
for fn, mf2node in mf2.iteritems():
if fn in mf1:
if (fn not in deleted and
((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
(mf1[fn] != mf2node and
(mf2node or ctx1[fn].cmp(ctx2[fn]))))):
modified.append(fn)
elif listclean:
clean.append(fn)
del mf1[fn]
elif fn not in deleted:
added.append(fn)
removed = mf1.keys()
if working and modified and not self.dirstate._checklink:
# Symlink placeholders may get non-symlink-like contents
# via user error or dereferencing by NFS or Samba servers,
# so we filter out any placeholders that don't look like a
# symlink
sane = []
for f in modified:
if ctx2.flags(f) == 'l':
d = ctx2[f].data()
if len(d) >= 1024 or '\n' in d or util.binary(d):
self.ui.debug('ignoring suspect symlink placeholder'
' "%s"\n' % f)
continue
sane.append(f)
modified = sane
r = modified, added, removed, deleted, unknown, ignored, clean
if listsubrepos:
for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
if working:
rev2 = None
else:
rev2 = ctx2.substate[subpath][1]
try:
submatch = matchmod.narrowmatcher(subpath, match)
s = sub.status(rev2, match=submatch, ignored=listignored,
clean=listclean, unknown=listunknown,
listsubrepos=True)
for rfiles, sfiles in zip(r, s):
rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
except error.LookupError:
self.ui.status(_("skipping missing subrepository: %s\n")
% subpath)
for l in r:
l.sort()
return r
def heads(self, start=None):
heads = self.changelog.heads(start)
# sort the output in rev descending order
return sorted(heads, key=self.changelog.rev, reverse=True)
def branchheads(self, branch=None, start=None, closed=False):
'''return a (possibly filtered) list of heads for the given branch
Heads are returned in topological order, from newest to oldest.
If branch is None, use the dirstate branch.
If start is not None, return only heads reachable from start.
If closed is True, return heads that are marked as closed as well.
'''
if branch is None:
branch = self[None].branch()
branches = self.branchmap()
if branch not in branches:
return []
# the cache returns heads ordered lowest to highest
bheads = list(reversed(branches[branch]))
if start is not None:
# filter out the heads that cannot be reached from startrev
fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
bheads = [h for h in bheads if h in fbheads]
if not closed:
bheads = [h for h in bheads if not self[h].closesbranch()]
return bheads
def branches(self, nodes):
if not nodes:
nodes = [self.changelog.tip()]
b = []
for n in nodes:
t = n
while True:
p = self.changelog.parents(n)
if p[1] != nullid or p[0] == nullid:
b.append((t, n, p[0], p[1]))
break
n = p[0]
return b
def between(self, pairs):
r = []
for top, bottom in pairs:
n, l, i = top, [], 0
f = 1
while n != bottom and n != nullid:
p = self.changelog.parents(n)[0]
if i == f:
l.append(n)
f = f * 2
n = p
i += 1
r.append(l)
return r
def pull(self, remote, heads=None, force=False):
# don't open transaction for nothing or you break future useful
# rollback call
tr = None
trname = 'pull\n' + util.hidepassword(remote.url())
lock = self.lock()
try:
tmp = discovery.findcommonincoming(self, remote, heads=heads,
force=force)
common, fetch, rheads = tmp
if not fetch:
self.ui.status(_("no changes found\n"))
added = []
result = 0
else:
tr = self.transaction(trname)
if heads is None and list(common) == [nullid]:
self.ui.status(_("requesting all changes\n"))
elif heads is None and remote.capable('changegroupsubset'):
# issue1320, avoid a race if remote changed after discovery
heads = rheads
if remote.capable('getbundle'):
cg = remote.getbundle('pull', common=common,
heads=heads or rheads)
elif heads is None:
cg = remote.changegroup(fetch, 'pull')
elif not remote.capable('changegroupsubset'):
raise util.Abort(_("partial pull cannot be done because "
"other repository doesn't support "
"changegroupsubset."))
else:
cg = remote.changegroupsubset(fetch, heads, 'pull')
# we use unfiltered changelog here because hidden revision must
# be taken in account for phase synchronization. They may
# becomes public and becomes visible again.
cl = self.unfiltered().changelog
clstart = len(cl)
result = self.addchangegroup(cg, 'pull', remote.url())
clend = len(cl)
added = [cl.node(r) for r in xrange(clstart, clend)]
# compute target subset
if heads is None:
# We pulled every thing possible
# sync on everything common
subset = common + added
else:
# We pulled a specific subset
# sync on this subset
subset = heads
# Get remote phases data from remote
remotephases = remote.listkeys('phases')
publishing = bool(remotephases.get('publishing', False))
if remotephases and not publishing:
# remote is new and unpublishing
pheads, _dr = phases.analyzeremotephases(self, subset,
remotephases)
phases.advanceboundary(self, phases.public, pheads)
phases.advanceboundary(self, phases.draft, subset)
else:
# Remote is old or publishing all common changesets
# should be seen as public
phases.advanceboundary(self, phases.public, subset)
def gettransaction():
if tr is None:
return self.transaction(trname)
return tr
obstr = obsolete.syncpull(self, remote, gettransaction)
if obstr is not None:
tr = obstr
if tr is not None:
tr.close()
finally:
if tr is not None:
tr.release()
lock.release()
return result
def checkpush(self, force, revs):
"""Extensions can override this function if additional checks have
to be performed before pushing, or call it if they override push
command.
"""
pass
def push(self, remote, force=False, revs=None, newbranch=False):
'''Push outgoing changesets (limited by revs) from the current
repository to remote. Return an integer:
- None means nothing to push
- 0 means HTTP error
- 1 means we pushed and remote head count is unchanged *or*
we have outgoing changesets but refused to push
- other values as described by addchangegroup()
'''
# there are two ways to push to remote repo:
#
# addchangegroup assumes local user can lock remote
# repo (local filesystem, old ssh servers).
#
# unbundle assumes local user cannot lock remote repo (new ssh
# servers, http servers).
if not remote.canpush():
raise util.Abort(_("destination does not support push"))
unfi = self.unfiltered()
def localphasemove(nodes, phase=phases.public):
"""move <nodes> to <phase> in the local source repo"""
if locallock is not None:
phases.advanceboundary(self, phase, nodes)
else:
# repo is not locked, do not change any phases!
# Informs the user that phases should have been moved when
# applicable.
actualmoves = [n for n in nodes if phase < self[n].phase()]
phasestr = phases.phasenames[phase]
if actualmoves:
self.ui.status(_('cannot lock source repo, skipping local'
' %s phase update\n') % phasestr)
# get local lock as we might write phase data
locallock = None
try:
locallock = self.lock()
except IOError, err:
if err.errno != errno.EACCES:
raise
# source repo cannot be locked.
# We do not abort the push, but just disable the local phase
# synchronisation.
msg = 'cannot lock source repository: %s\n' % err
self.ui.debug(msg)
try:
self.checkpush(force, revs)
lock = None
unbundle = remote.capable('unbundle')
if not unbundle:
lock = remote.lock()
try:
# discovery
fci = discovery.findcommonincoming
commoninc = fci(unfi, remote, force=force)
common, inc, remoteheads = commoninc
fco = discovery.findcommonoutgoing
outgoing = fco(unfi, remote, onlyheads=revs,
commoninc=commoninc, force=force)
if not outgoing.missing:
# nothing to push
scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
ret = None
else:
# something to push
if not force:
# if self.obsstore == False --> no obsolete
# then, save the iteration
if unfi.obsstore:
# this message are here for 80 char limit reason
mso = _("push includes obsolete changeset: %s!")
mst = "push includes %s changeset: %s!"
# plain versions for i18n tool to detect them
_("push includes unstable changeset: %s!")
_("push includes bumped changeset: %s!")
_("push includes divergent changeset: %s!")
# If we are to push if there is at least one
# obsolete or unstable changeset in missing, at
# least one of the missinghead will be obsolete or
# unstable. So checking heads only is ok
for node in outgoing.missingheads:
ctx = unfi[node]
if ctx.obsolete():
raise util.Abort(mso % ctx)
elif ctx.troubled():
raise util.Abort(_(mst)
% (ctx.troubles()[0],
ctx))
discovery.checkheads(unfi, remote, outgoing,
remoteheads, newbranch,
bool(inc))
# create a changegroup from local
if revs is None and not outgoing.excluded:
# push everything,
# use the fast path, no race possible on push
cg = self._changegroup(outgoing.missing, 'push')
else:
cg = self.getlocalbundle('push', outgoing)
# apply changegroup to remote
if unbundle:
# local repo finds heads on server, finds out what
# revs it must push. once revs transferred, if server
# finds it has different heads (someone else won
# commit/push race), server aborts.
if force:
remoteheads = ['force']
# ssh: return remote's addchangegroup()
# http: return remote's addchangegroup() or 0 for error
ret = remote.unbundle(cg, remoteheads, 'push')
else:
# we return an integer indicating remote head count
# change
ret = remote.addchangegroup(cg, 'push', self.url())
if ret:
# push succeed, synchronize target of the push
cheads = outgoing.missingheads
elif revs is None:
# All out push fails. synchronize all common
cheads = outgoing.commonheads
else:
# I want cheads = heads(::missingheads and ::commonheads)
# (missingheads is revs with secret changeset filtered out)
#
# This can be expressed as:
# cheads = ( (missingheads and ::commonheads)
# + (commonheads and ::missingheads))"
# )
#
# while trying to push we already computed the following:
# common = (::commonheads)
# missing = ((commonheads::missingheads) - commonheads)
#
# We can pick:
# * missingheads part of common (::commonheads)
common = set(outgoing.common)
cheads = [node for node in revs if node in common]
# and
# * commonheads parents on missing
revset = unfi.set('%ln and parents(roots(%ln))',
outgoing.commonheads,
outgoing.missing)
cheads.extend(c.node() for c in revset)
# even when we don't push, exchanging phase data is useful
remotephases = remote.listkeys('phases')
if (self.ui.configbool('ui', '_usedassubrepo', False)
and remotephases # server supports phases
and ret is None # nothing was pushed
and remotephases.get('publishing', False)):
# When:
# - this is a subrepo push
# - and remote support phase
# - and no changeset was pushed
# - and remote is publishing
# We may be in issue 3871 case!
# We drop the possible phase synchronisation done by
# courtesy to publish changesets possibly locally draft
# on the remote.
remotephases = {'publishing': 'True'}
if not remotephases: # old server or public only repo
localphasemove(cheads)
# don't push any phase data as there is nothing to push
else:
ana = phases.analyzeremotephases(self, cheads, remotephases)
pheads, droots = ana
### Apply remote phase on local
if remotephases.get('publishing', False):
localphasemove(cheads)
else: # publish = False
localphasemove(pheads)
localphasemove(cheads, phases.draft)
### Apply local phase on remote
# Get the list of all revs draft on remote by public here.
# XXX Beware that revset break if droots is not strictly
# XXX root we may want to ensure it is but it is costly
outdated = unfi.set('heads((%ln::%ln) and public())',
droots, cheads)
for newremotehead in outdated:
r = remote.pushkey('phases',
newremotehead.hex(),
str(phases.draft),
str(phases.public))
if not r:
self.ui.warn(_('updating %s to public failed!\n')
% newremotehead)
self.ui.debug('try to push obsolete markers to remote\n')
obsolete.syncpush(self, remote)
finally:
if lock is not None:
lock.release()
finally:
if locallock is not None:
locallock.release()
self.ui.debug("checking for updated bookmarks\n")
rb = remote.listkeys('bookmarks')
for k in rb.keys():
if k in unfi._bookmarks:
nr, nl = rb[k], hex(self._bookmarks[k])
if nr in unfi:
cr = unfi[nr]
cl = unfi[nl]
if bookmarks.validdest(unfi, cr, cl):
r = remote.pushkey('bookmarks', k, nr, nl)
if r:
self.ui.status(_("updating bookmark %s\n") % k)
else:
self.ui.warn(_('updating bookmark %s'
' failed!\n') % k)
return ret
def changegroupinfo(self, nodes, source):
if self.ui.verbose or source == 'bundle':
self.ui.status(_("%d changesets found\n") % len(nodes))
if self.ui.debugflag:
self.ui.debug("list of changesets:\n")
for node in nodes:
self.ui.debug("%s\n" % hex(node))
def changegroupsubset(self, bases, heads, source):
"""Compute a changegroup consisting of all the nodes that are
descendants of any of the bases and ancestors of any of the heads.
Return a chunkbuffer object whose read() method will return
successive changegroup chunks.
It is fairly complex as determining which filenodes and which
manifest nodes need to be included for the changeset to be complete
is non-trivial.
Another wrinkle is doing the reverse, figuring out which changeset in
the changegroup a particular filenode or manifestnode belongs to.
"""
cl = self.changelog
if not bases:
bases = [nullid]
csets, bases, heads = cl.nodesbetween(bases, heads)
# We assume that all ancestors of bases are known
common = cl.ancestors([cl.rev(n) for n in bases])
return self._changegroupsubset(common, csets, heads, source)
def getlocalbundle(self, source, outgoing):
"""Like getbundle, but taking a discovery.outgoing as an argument.
This is only implemented for local repos and reuses potentially
precomputed sets in outgoing."""
if not outgoing.missing:
return None
return self._changegroupsubset(outgoing.common,
outgoing.missing,
outgoing.missingheads,
source)
def getbundle(self, source, heads=None, common=None):
"""Like changegroupsubset, but returns the set difference between the
ancestors of heads and the ancestors common.
If heads is None, use the local heads. If common is None, use [nullid].
The nodes in common might not all be known locally due to the way the
current discovery protocol works.
"""
cl = self.changelog
if common:
hasnode = cl.hasnode
common = [n for n in common if hasnode(n)]
else:
common = [nullid]
if not heads:
heads = cl.heads()
return self.getlocalbundle(source,
discovery.outgoing(cl, common, heads))
@unfilteredmethod
def _changegroupsubset(self, commonrevs, csets, heads, source):
cl = self.changelog
mf = self.manifest
mfs = {} # needed manifests
fnodes = {} # needed file nodes
changedfiles = set()
fstate = ['', {}]
count = [0, 0]
# can we go through the fast path ?
heads.sort()
if heads == sorted(self.heads()):
return self._changegroup(csets, source)
# slow path
self.hook('preoutgoing', throw=True, source=source)
self.changegroupinfo(csets, source)
# filter any nodes that claim to be part of the known set
def prune(revlog, missing):
rr, rl = revlog.rev, revlog.linkrev
return [n for n in missing
if rl(rr(n)) not in commonrevs]
progress = self.ui.progress
_bundling = _('bundling')
_changesets = _('changesets')
_manifests = _('manifests')
_files = _('files')
def lookup(revlog, x):
if revlog == cl:
c = cl.read(x)
changedfiles.update(c[3])
mfs.setdefault(c[0], x)
count[0] += 1
progress(_bundling, count[0],
unit=_changesets, total=count[1])
return x
elif revlog == mf:
clnode = mfs[x]
mdata = mf.readfast(x)
for f, n in mdata.iteritems():
if f in changedfiles:
fnodes[f].setdefault(n, clnode)
count[0] += 1
progress(_bundling, count[0],
unit=_manifests, total=count[1])
return clnode
else:
progress(_bundling, count[0], item=fstate[0],
unit=_files, total=count[1])
return fstate[1][x]
bundler = changegroup.bundle10(lookup)
reorder = self.ui.config('bundle', 'reorder', 'auto')
if reorder == 'auto':
reorder = None
else:
reorder = util.parsebool(reorder)
def gengroup():
# Create a changenode group generator that will call our functions
# back to lookup the owning changenode and collect information.
count[:] = [0, len(csets)]
for chunk in cl.group(csets, bundler, reorder=reorder):
yield chunk
progress(_bundling, None)
# Create a generator for the manifestnodes that calls our lookup
# and data collection functions back.
for f in changedfiles:
fnodes[f] = {}
count[:] = [0, len(mfs)]
for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
yield chunk
progress(_bundling, None)
mfs.clear()
# Go through all our files in order sorted by name.
count[:] = [0, len(changedfiles)]
for fname in sorted(changedfiles):
filerevlog = self.file(fname)
if not len(filerevlog):
raise util.Abort(_("empty or missing revlog for %s")
% fname)
fstate[0] = fname
fstate[1] = fnodes.pop(fname, {})
nodelist = prune(filerevlog, fstate[1])
if nodelist:
count[0] += 1
yield bundler.fileheader(fname)
for chunk in filerevlog.group(nodelist, bundler, reorder):
yield chunk
# Signal that no more groups are left.
yield bundler.close()
progress(_bundling, None)
if csets:
self.hook('outgoing', node=hex(csets[0]), source=source)
return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
def changegroup(self, basenodes, source):
# to avoid a race we use changegroupsubset() (issue1320)
return self.changegroupsubset(basenodes, self.heads(), source)
@unfilteredmethod
def _changegroup(self, nodes, source):
"""Compute the changegroup of all nodes that we have that a recipient
doesn't. Return a chunkbuffer object whose read() method will return
successive changegroup chunks.
This is much easier than the previous function as we can assume that
the recipient has any changenode we aren't sending them.
nodes is the set of nodes to send"""
cl = self.changelog
mf = self.manifest
mfs = {}
changedfiles = set()
fstate = ['']
count = [0, 0]
self.hook('preoutgoing', throw=True, source=source)
self.changegroupinfo(nodes, source)
revset = set([cl.rev(n) for n in nodes])
def gennodelst(log):
ln, llr = log.node, log.linkrev
return [ln(r) for r in log if llr(r) in revset]
progress = self.ui.progress
_bundling = _('bundling')
_changesets = _('changesets')
_manifests = _('manifests')
_files = _('files')
def lookup(revlog, x):
if revlog == cl:
c = cl.read(x)
changedfiles.update(c[3])
mfs.setdefault(c[0], x)
count[0] += 1
progress(_bundling, count[0],
unit=_changesets, total=count[1])
return x
elif revlog == mf:
count[0] += 1
progress(_bundling, count[0],
unit=_manifests, total=count[1])
return cl.node(revlog.linkrev(revlog.rev(x)))
else:
progress(_bundling, count[0], item=fstate[0],
total=count[1], unit=_files)
return cl.node(revlog.linkrev(revlog.rev(x)))
bundler = changegroup.bundle10(lookup)
reorder = self.ui.config('bundle', 'reorder', 'auto')
if reorder == 'auto':
reorder = None
else:
reorder = util.parsebool(reorder)
def gengroup():
'''yield a sequence of changegroup chunks (strings)'''
# construct a list of all changed files
count[:] = [0, len(nodes)]
for chunk in cl.group(nodes, bundler, reorder=reorder):
yield chunk
progress(_bundling, None)
count[:] = [0, len(mfs)]
for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
yield chunk
progress(_bundling, None)
count[:] = [0, len(changedfiles)]
for fname in sorted(changedfiles):
filerevlog = self.file(fname)
if not len(filerevlog):
raise util.Abort(_("empty or missing revlog for %s")
% fname)
fstate[0] = fname
nodelist = gennodelst(filerevlog)
if nodelist:
count[0] += 1
yield bundler.fileheader(fname)
for chunk in filerevlog.group(nodelist, bundler, reorder):
yield chunk
yield bundler.close()
progress(_bundling, None)
if nodes:
self.hook('outgoing', node=hex(nodes[0]), source=source)
return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
@unfilteredmethod
def addchangegroup(self, source, srctype, url, emptyok=False):
"""Add the changegroup returned by source.read() to this repo.
srctype is a string like 'push', 'pull', or 'unbundle'. url is
the URL of the repo where this changegroup is coming from.
Return an integer summarizing the change to this repo:
- nothing changed or no source: 0
- more heads than before: 1+added heads (2..n)
- fewer heads than before: -1-removed heads (-2..-n)
- number of heads stays the same: 1
"""
def csmap(x):
self.ui.debug("add changeset %s\n" % short(x))
return len(cl)
def revmap(x):
return cl.rev(x)
if not source:
return 0
self.hook('prechangegroup', throw=True, source=srctype, url=url)
changesets = files = revisions = 0
efiles = set()
# write changelog data to temp files so concurrent readers will not see
# inconsistent view
cl = self.changelog
cl.delayupdate()
oldheads = cl.heads()
tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
try:
trp = weakref.proxy(tr)
# pull off the changeset group
self.ui.status(_("adding changesets\n"))
clstart = len(cl)
class prog(object):
step = _('changesets')
count = 1
ui = self.ui
total = None
def __call__(self):
self.ui.progress(self.step, self.count, unit=_('chunks'),
total=self.total)
self.count += 1
pr = prog()
source.callback = pr
source.changelogheader()
srccontent = cl.addgroup(source, csmap, trp)
if not (srccontent or emptyok):
raise util.Abort(_("received changelog group is empty"))
clend = len(cl)
changesets = clend - clstart
for c in xrange(clstart, clend):
efiles.update(self[c].files())
efiles = len(efiles)
self.ui.progress(_('changesets'), None)
# pull off the manifest group
self.ui.status(_("adding manifests\n"))
pr.step = _('manifests')
pr.count = 1
pr.total = changesets # manifests <= changesets
# no need to check for empty manifest group here:
# if the result of the merge of 1 and 2 is the same in 3 and 4,
# no new manifest will be created and the manifest group will
# be empty during the pull
source.manifestheader()
self.manifest.addgroup(source, revmap, trp)
self.ui.progress(_('manifests'), None)
needfiles = {}
if self.ui.configbool('server', 'validate', default=False):
# validate incoming csets have their manifests
for cset in xrange(clstart, clend):
mfest = self.changelog.read(self.changelog.node(cset))[0]
mfest = self.manifest.readdelta(mfest)
# store file nodes we must see
for f, n in mfest.iteritems():
needfiles.setdefault(f, set()).add(n)
# process the files
self.ui.status(_("adding file changes\n"))
pr.step = _('files')
pr.count = 1
pr.total = efiles
source.callback = None
while True:
chunkdata = source.filelogheader()
if not chunkdata:
break
f = chunkdata["filename"]
self.ui.debug("adding %s revisions\n" % f)
pr()
fl = self.file(f)
o = len(fl)
if not fl.addgroup(source, revmap, trp):
raise util.Abort(_("received file revlog group is empty"))
revisions += len(fl) - o
files += 1
if f in needfiles:
needs = needfiles[f]
for new in xrange(o, len(fl)):
n = fl.node(new)
if n in needs:
needs.remove(n)
else:
raise util.Abort(
_("received spurious file revlog entry"))
if not needs:
del needfiles[f]
self.ui.progress(_('files'), None)
for f, needs in needfiles.iteritems():
fl = self.file(f)
for n in needs:
try:
fl.rev(n)
except error.LookupError:
raise util.Abort(
_('missing file data for %s:%s - run hg verify') %
(f, hex(n)))
dh = 0
if oldheads:
heads = cl.heads()
dh = len(heads) - len(oldheads)
for h in heads:
if h not in oldheads and self[h].closesbranch():
dh -= 1
htext = ""
if dh:
htext = _(" (%+d heads)") % dh
self.ui.status(_("added %d changesets"
" with %d changes to %d files%s\n")
% (changesets, revisions, files, htext))
self.invalidatevolatilesets()
if changesets > 0:
p = lambda: cl.writepending() and self.root or ""
self.hook('pretxnchangegroup', throw=True,
node=hex(cl.node(clstart)), source=srctype,
url=url, pending=p)
added = [cl.node(r) for r in xrange(clstart, clend)]
publishing = self.ui.configbool('phases', 'publish', True)
if srctype == 'push':
# Old server can not push the boundary themself.
# New server won't push the boundary if changeset already
# existed locally as secrete
#
# We should not use added here but the list of all change in
# the bundle
if publishing:
phases.advanceboundary(self, phases.public, srccontent)
else:
phases.advanceboundary(self, phases.draft, srccontent)
phases.retractboundary(self, phases.draft, added)
elif srctype != 'strip':
# publishing only alter behavior during push
#
# strip should not touch boundary at all
phases.retractboundary(self, phases.draft, added)
# make changelog see real files again
cl.finalize(trp)
tr.close()
if changesets > 0:
if srctype != 'strip':
# During strip, branchcache is invalid but coming call to
# `destroyed` will repair it.
# In other case we can safely update cache on disk.
branchmap.updatecache(self.filtered('served'))
def runhooks():
# forcefully update the on-disk branch cache
self.ui.debug("updating the branch cache\n")
self.hook("changegroup", node=hex(cl.node(clstart)),
source=srctype, url=url)
for n in added:
self.hook("incoming", node=hex(n), source=srctype,
url=url)
newheads = [h for h in self.heads() if h not in oldheads]
self.ui.log("incoming",
"%s incoming changes - new heads: %s\n",
len(added),
', '.join([hex(c[:6]) for c in newheads]))
self._afterlock(runhooks)
finally:
tr.release()
# never return 0 here:
if dh < 0:
return dh - 1
else:
return dh + 1
def stream_in(self, remote, requirements):
lock = self.lock()
try:
# Save remote branchmap. We will use it later
# to speed up branchcache creation
rbranchmap = None
if remote.capable("branchmap"):
rbranchmap = remote.branchmap()
fp = remote.stream_out()
l = fp.readline()
try:
resp = int(l)
except ValueError:
raise error.ResponseError(
_('unexpected response from remote server:'), l)
if resp == 1:
raise util.Abort(_('operation forbidden by server'))
elif resp == 2:
raise util.Abort(_('locking the remote repository failed'))
elif resp != 0:
raise util.Abort(_('the server sent an unknown error code'))
self.ui.status(_('streaming all changes\n'))
l = fp.readline()
try:
total_files, total_bytes = map(int, l.split(' ', 1))
except (ValueError, TypeError):
raise error.ResponseError(
_('unexpected response from remote server:'), l)
self.ui.status(_('%d files to transfer, %s of data\n') %
(total_files, util.bytecount(total_bytes)))
handled_bytes = 0
self.ui.progress(_('clone'), 0, total=total_bytes)
start = time.time()
for i in xrange(total_files):
# XXX doesn't support '\n' or '\r' in filenames
l = fp.readline()
try:
name, size = l.split('\0', 1)
size = int(size)
except (ValueError, TypeError):
raise error.ResponseError(
_('unexpected response from remote server:'), l)
if self.ui.debugflag:
self.ui.debug('adding %s (%s)\n' %
(name, util.bytecount(size)))
# for backwards compat, name was partially encoded
ofp = self.sopener(store.decodedir(name), 'w')
for chunk in util.filechunkiter(fp, limit=size):
handled_bytes += len(chunk)
self.ui.progress(_('clone'), handled_bytes,
total=total_bytes)
ofp.write(chunk)
ofp.close()
elapsed = time.time() - start
if elapsed <= 0:
elapsed = 0.001
self.ui.progress(_('clone'), None)
self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
(util.bytecount(total_bytes), elapsed,
util.bytecount(total_bytes / elapsed)))
# new requirements = old non-format requirements +
# new format-related
# requirements from the streamed-in repository
requirements.update(set(self.requirements) - self.supportedformats)
self._applyrequirements(requirements)
self._writerequirements()
if rbranchmap:
rbheads = []
for bheads in rbranchmap.itervalues():
rbheads.extend(bheads)
if rbheads:
rtiprev = max((int(self.changelog.rev(node))
for node in rbheads))
cache = branchmap.branchcache(rbranchmap,
self[rtiprev].node(),
rtiprev)
# Try to stick it as low as possible
# filter above served are unlikely to be fetch from a clone
for candidate in ('base', 'immutable', 'served'):
rview = self.filtered(candidate)
if cache.validfor(rview):
self._branchcaches[candidate] = cache
cache.write(rview)
break
self.invalidate()
return len(self.heads()) + 1
finally:
lock.release()
def clone(self, remote, heads=[], stream=False):
'''clone remote repository.
keyword arguments:
heads: list of revs to clone (forces use of pull)
stream: use streaming clone if possible'''
# now, all clients that can request uncompressed clones can
# read repo formats supported by all servers that can serve
# them.
# if revlog format changes, client will have to check version
# and format flags on "stream" capability, and use
# uncompressed only if compatible.
if not stream:
# if the server explicitly prefers to stream (for fast LANs)
stream = remote.capable('stream-preferred')
if stream and not heads:
# 'stream' means remote revlog format is revlogv1 only
if remote.capable('stream'):
return self.stream_in(remote, set(('revlogv1',)))
# otherwise, 'streamreqs' contains the remote revlog format
streamreqs = remote.capable('streamreqs')
if streamreqs:
streamreqs = set(streamreqs.split(','))
# if we support it, stream in and adjust our requirements
if not streamreqs - self.supportedformats:
return self.stream_in(remote, streamreqs)
return self.pull(remote, heads)
def pushkey(self, namespace, key, old, new):
self.hook('prepushkey', throw=True, namespace=namespace, key=key,
old=old, new=new)
self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
ret = pushkey.push(self, namespace, key, old, new)
self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
ret=ret)
return ret
def listkeys(self, namespace):
self.hook('prelistkeys', throw=True, namespace=namespace)
self.ui.debug('listing keys for "%s"\n' % namespace)
values = pushkey.list(self, namespace)
self.hook('listkeys', namespace=namespace, values=values)
return values
def debugwireargs(self, one, two, three=None, four=None, five=None):
'''used to test argument passing over the wire'''
return "%s %s %s %s %s" % (one, two, three, four, five)
def savecommitmessage(self, text):
fp = self.opener('last-message.txt', 'wb')
try:
fp.write(text)
finally:
fp.close()
return self.pathto(fp.name[len(self.root) + 1:])
# used to avoid circular references so destructors work
def aftertrans(files):
renamefiles = [tuple(t) for t in files]
def a():
for vfs, src, dest in renamefiles:
try:
vfs.rename(src, dest)
except OSError: # journal file does not yet exist
pass
return a
def undoname(fn):
base, name = os.path.split(fn)
assert name.startswith('journal')
return os.path.join(base, name.replace('journal', 'undo', 1))
def instance(ui, path, create):
return localrepository(ui, util.urllocalpath(path), create)
def islocal(path):
return True
| apache-2.0 |
AHJenin/acm-type-problems | Timus/AC/1197-Lonesome_Knight.py | 1 | 1209 | #!/usr/bin/env python3
#
# FILE: 1197-Lonesome_Knight.py
#
# @author: Arafat Hasan Jenin <opendoor.arafat[at]gmail[dot]com>
#
# LINK:
#
# DATE CREATED: 15-06-18 15:40:41 (+06)
# LAST MODIFIED: 15-06-18 17:29:41 (+06)
#
# VERDICT: Accepted
#
# DEVELOPMENT HISTORY:
# Date Version Description
# --------------------------------------------------------------------
# 15-06-18 1.0 Deleted code is debugged code.
#
# _/ _/_/_/_/ _/ _/ _/_/_/ _/ _/
# _/ _/ _/_/ _/ _/ _/_/ _/
# _/ _/_/_/ _/ _/ _/ _/ _/ _/ _/
# _/ _/ _/ _/ _/_/ _/ _/ _/_/
# _/_/ _/_/_/_/ _/ _/ _/_/_/ _/ _/
#
##############################################################################
dx = [2, 1, -1, -2, -2, -1, 1, 2]
dy = [1, 2, 2, 1, -1, -2, -2, -1] # Knight Direction
N = int(input())
while (N > 0):
N -= 1
pos = input()
x = ord(pos[0]) - ord('a')
y = int(pos[1]) - 1
ans = 0
for i in range(0, 8):
new_x = x + dx[i]
new_y = y + dy[i]
if new_x >= 0 and new_x < 8 and new_y >= 0 and new_y < 8:
ans += 1
print(ans)
| mit |
nikofil/invenio-search-ui | invenio_search_ui/views.py | 1 | 2169 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""UI for Invenio-Search."""
from __future__ import absolute_import, print_function
from flask import Blueprint, current_app, json, render_template
blueprint = Blueprint(
'invenio_search_ui',
__name__,
template_folder='templates',
static_folder='static',
)
@blueprint.route("/search")
def search():
"""Search page ui."""
return render_template(current_app.config['SEARCH_UI_SEARCH_TEMPLATE'])
def sorted_options(sort_options):
"""Sort sort options for display.
:param sort_options: A dictionary containing the field name as key and
asc/desc as value.
:returns: A dictionary with sorting options for Invenio-Search-JS.
"""
return [
{
'title': v['title'],
'value': ('-{0}'.format(k)
if v.get('default_order', 'asc') == 'desc' else k),
}
for k, v in
sorted(sort_options.items(), key=lambda x: x[1].get('order', 0))
]
@blueprint.app_template_filter('format_sortoptions')
def format_sortoptions(sort_options):
"""Create sort options JSON dump for Invenio-Search-JS."""
return json.dumps({
'options': sorted_options(sort_options)
})
| gpl-2.0 |
birdonwheels5/p2pool-myrGroestl | p2pool/util/deferral.py | 233 | 8790 | from __future__ import division
import itertools
import random
import sys
from twisted.internet import defer, reactor
from twisted.python import failure, log
def sleep(t):
d = defer.Deferred(canceller=lambda d_: dc.cancel())
dc = reactor.callLater(t, d.callback, None)
return d
def run_repeatedly(f, *args, **kwargs):
current_dc = [None]
def step():
delay = f(*args, **kwargs)
current_dc[0] = reactor.callLater(delay, step)
step()
def stop():
current_dc[0].cancel()
return stop
class RetrySilentlyException(Exception):
pass
def retry(message='Error:', delay=3, max_retries=None, traceback=True):
'''
@retry('Error getting block:', 1)
@defer.inlineCallbacks
def get_block(hash):
...
'''
def retry2(func):
@defer.inlineCallbacks
def f(*args, **kwargs):
for i in itertools.count():
try:
result = yield func(*args, **kwargs)
except Exception, e:
if i == max_retries:
raise
if not isinstance(e, RetrySilentlyException):
if traceback:
log.err(None, message)
else:
print >>sys.stderr, message, e
yield sleep(delay)
else:
defer.returnValue(result)
return f
return retry2
class ReplyMatcher(object):
'''
Converts request/got response interface to deferred interface
'''
def __init__(self, func, timeout=5):
self.func = func
self.timeout = timeout
self.map = {}
def __call__(self, id):
if id not in self.map:
self.func(id)
df = defer.Deferred()
def timeout():
self.map[id].remove((df, timer))
if not self.map[id]:
del self.map[id]
df.errback(failure.Failure(defer.TimeoutError('in ReplyMatcher')))
timer = reactor.callLater(self.timeout, timeout)
self.map.setdefault(id, set()).add((df, timer))
return df
def got_response(self, id, resp):
if id not in self.map:
return
for df, timer in self.map.pop(id):
df.callback(resp)
timer.cancel()
class GenericDeferrer(object):
'''
Converts query with identifier/got response interface to deferred interface
'''
def __init__(self, max_id, func, timeout=5, on_timeout=lambda: None):
self.max_id = max_id
self.func = func
self.timeout = timeout
self.on_timeout = on_timeout
self.map = {}
def __call__(self, *args, **kwargs):
while True:
id = random.randrange(self.max_id)
if id not in self.map:
break
def cancel(df):
df, timer = self.map.pop(id)
timer.cancel()
try:
df = defer.Deferred(cancel)
except TypeError:
df = defer.Deferred() # handle older versions of Twisted
def timeout():
self.map.pop(id)
df.errback(failure.Failure(defer.TimeoutError('in GenericDeferrer')))
self.on_timeout()
timer = reactor.callLater(self.timeout, timeout)
self.map[id] = df, timer
self.func(id, *args, **kwargs)
return df
def got_response(self, id, resp):
if id not in self.map:
return
df, timer = self.map.pop(id)
timer.cancel()
df.callback(resp)
def respond_all(self, resp):
while self.map:
id, (df, timer) = self.map.popitem()
timer.cancel()
df.errback(resp)
class NotNowError(Exception):
pass
class DeferredCacher(object):
'''
like memoize, but for functions that return Deferreds
@DeferredCacher
def f(x):
...
return df
@DeferredCacher.with_backing(bsddb.hashopen(...))
def f(x):
...
return df
'''
@classmethod
def with_backing(cls, backing):
return lambda func: cls(func, backing)
def __init__(self, func, backing=None):
if backing is None:
backing = {}
self.func = func
self.backing = backing
self.waiting = {}
@defer.inlineCallbacks
def __call__(self, key):
if key in self.waiting:
yield self.waiting[key]
if key in self.backing:
defer.returnValue(self.backing[key])
else:
self.waiting[key] = defer.Deferred()
try:
value = yield self.func(key)
finally:
self.waiting.pop(key).callback(None)
self.backing[key] = value
defer.returnValue(value)
_nothing = object()
def call_now(self, key, default=_nothing):
if key in self.backing:
return self.backing[key]
if key not in self.waiting:
self.waiting[key] = defer.Deferred()
def cb(value):
self.backing[key] = value
self.waiting.pop(key).callback(None)
def eb(fail):
self.waiting.pop(key).callback(None)
if fail.check(RetrySilentlyException):
return
print
print 'Error when requesting noncached value:'
fail.printTraceback()
print
self.func(key).addCallback(cb).addErrback(eb)
if default is not self._nothing:
return default
raise NotNowError(key)
def deferred_has_been_called(df):
still_running = True
res2 = []
def cb(res):
if still_running:
res2[:] = [res]
else:
return res
df.addBoth(cb)
still_running = False
if res2:
return True, res2[0]
return False, None
def inlineCallbacks(f):
from functools import wraps
@wraps(f)
def _(*args, **kwargs):
gen = f(*args, **kwargs)
stop_running = [False]
def cancelled(df_):
assert df_ is df
stop_running[0] = True
if currently_waiting_on:
currently_waiting_on[0].cancel()
df = defer.Deferred(cancelled)
currently_waiting_on = []
def it(cur):
while True:
try:
if isinstance(cur, failure.Failure):
res = cur.throwExceptionIntoGenerator(gen) # external code is run here
else:
res = gen.send(cur) # external code is run here
if stop_running[0]:
return
except StopIteration:
df.callback(None)
except defer._DefGen_Return as e:
# XXX should make sure direct child threw
df.callback(e.value)
except:
df.errback()
else:
if isinstance(res, defer.Deferred):
called, res2 = deferred_has_been_called(res)
if called:
cur = res2
continue
else:
currently_waiting_on[:] = [res]
def gotResult(res2):
assert currently_waiting_on[0] is res
currently_waiting_on[:] = []
if stop_running[0]:
return
it(res2)
res.addBoth(gotResult) # external code is run between this and gotResult
else:
cur = res
continue
break
it(None)
return df
return _
class RobustLoopingCall(object):
def __init__(self, func, *args, **kwargs):
self.func, self.args, self.kwargs = func, args, kwargs
self.running = False
def start(self, period):
assert not self.running
self.running = True
self._df = self._worker(period).addErrback(lambda fail: fail.trap(defer.CancelledError))
@inlineCallbacks
def _worker(self, period):
assert self.running
while self.running:
try:
self.func(*self.args, **self.kwargs)
except:
log.err()
yield sleep(period)
def stop(self):
assert self.running
self.running = False
self._df.cancel()
return self._df
| gpl-3.0 |
dmilith/SublimeText3-dmilith | Packages/pyte/all/pyte/screens.py | 1 | 46773 | # -*- coding: utf-8 -*-
"""
pyte.screens
~~~~~~~~~~~~
This module provides classes for terminal screens, currently
it contains three screens with different features:
* :class:`~pyte.screens.Screen` -- base screen implementation,
which handles all the core escape sequences, recognized by
:class:`~pyte.streams.Stream`.
* If you need a screen to keep track of the changed lines
(which you probably do need) -- use
:class:`~pyte.screens.DiffScreen`.
* If you also want a screen to collect history and allow
pagination -- :class:`pyte.screen.HistoryScreen` is here
for ya ;)
.. note:: It would be nice to split those features into mixin
classes, rather than subclasses, but it's not obvious
how to do -- feel free to submit a pull request.
:copyright: (c) 2011-2012 by Selectel.
:copyright: (c) 2012-2017 by pyte authors and contributors,
see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
from __future__ import absolute_import, unicode_literals, division
import copy
import json
import math
import os
import sys
import unicodedata
import warnings
from collections import deque, namedtuple, defaultdict
from wcwidth import wcwidth
# There is no standard 2.X backport for ``lru_cache``.
if sys.version_info >= (3, 2):
from functools import lru_cache
wcwidth = lru_cache(maxsize=4096)(wcwidth)
from . import (
charsets as cs,
control as ctrl,
graphics as g,
modes as mo
)
from .compat import map, range, str
from .streams import Stream
#: A container for screen's scroll margins.
Margins = namedtuple("Margins", "top bottom")
#: A container for savepoint, created on :data:`~pyte.escape.DECSC`.
Savepoint = namedtuple("Savepoint", [
"cursor",
"g0_charset",
"g1_charset",
"charset",
"origin",
"wrap"
])
class Char(namedtuple("Char", [
"data",
"fg",
"bg",
"bold",
"italics",
"underscore",
"strikethrough",
"reverse",
])):
"""A single styled on-screen character.
:param str data: unicode character. Invariant: ``len(data) == 1``.
:param str fg: foreground colour. Defaults to ``"default"``.
:param str bg: background colour. Defaults to ``"default"``.
:param bool bold: flag for rendering the character using bold font.
Defaults to ``False``.
:param bool italics: flag for rendering the character using italic font.
Defaults to ``False``.
:param bool underscore: flag for rendering the character underlined.
Defaults to ``False``.
:param bool strikethrough: flag for rendering the character with a
strike-through line. Defaults to ``False``.
:param bool reverse: flag for swapping foreground and background colours
during rendering. Defaults to ``False``.
"""
__slots__ = ()
def __new__(cls, data, fg="default", bg="default", bold=False,
italics=False, underscore=False,
strikethrough=False, reverse=False):
return super(Char, cls).__new__(cls, data, fg, bg, bold, italics,
underscore, strikethrough, reverse)
class Cursor(object):
"""Screen cursor.
:param int x: 0-based horizontal cursor position.
:param int y: 0-based vertical cursor position.
:param pyte.screens.Char attrs: cursor attributes (see
:meth:`~pyte.screens.Screen.select_graphic_rendition`
for details).
"""
__slots__ = ("x", "y", "attrs", "hidden")
def __init__(self, x, y, attrs=Char(" ")):
self.x = x
self.y = y
self.attrs = attrs
self.hidden = False
class StaticDefaultDict(dict):
"""A :func:`dict` with a static default value.
Unlike :func:`collections.defaultdict` this implementation does not
implicitly update the mapping when queried with a missing key.
>>> d = StaticDefaultDict(42)
>>> d["foo"]
42
>>> d
{}
"""
def __init__(self, default):
self.default = default
def __missing__(self, key):
return self.default
class Screen(object):
"""
A screen is an in-memory matrix of characters that represents the
screen display of the terminal. It can be instantiated on its own
and given explicit commands, or it can be attached to a stream and
will respond to events.
.. attribute:: buffer
A sparse ``lines x columns`` :class:`~pyte.screens.Char` matrix.
.. attribute:: dirty
A set of line numbers, which should be re-drawn. The user is responsible
for clearing this set when changes have been applied.
>>> screen = Screen(80, 24)
>>> screen.dirty.clear()
>>> screen.draw("!")
>>> list(screen.dirty)
[0]
.. versionadded:: 0.7.0
.. attribute:: cursor
Reference to the :class:`~pyte.screens.Cursor` object, holding
cursor position and attributes.
.. attribute:: margins
Margins determine which screen lines move during scrolling
(see :meth:`index` and :meth:`reverse_index`). Characters added
outside the scrolling region do not make the screen to scroll.
The value is ``None`` if margins are set to screen boundaries,
otherwise -- a pair 0-based top and bottom line indices.
.. attribute:: charset
Current charset number; can be either ``0`` or ``1`` for `G0`
and `G1` respectively, note that `G0` is activated by default.
.. note::
According to ``ECMA-48`` standard, **lines and columns are
1-indexed**, so, for instance ``ESC [ 10;10 f`` really means
-- move cursor to position (9, 9) in the display matrix.
.. versionchanged:: 0.4.7
.. warning::
:data:`~pyte.modes.LNM` is reset by default, to match VT220
specification. Unfortunatelly this makes :mod:`pyte` fail
``vttest`` for cursor movement.
.. versionchanged:: 0.4.8
.. warning::
If `DECAWM` mode is set than a cursor will be wrapped to the
**beginning** of the next line, which is the behaviour described
in ``man console_codes``.
.. seealso::
`Standard ECMA-48, Section 6.1.1 \
<http://ecma-international.org/publications/standards/Ecma-048.htm>`_
for a description of the presentational component, implemented
by ``Screen``.
"""
@property
def default_char(self):
"""An empty character with default foreground and background colors."""
reverse = mo.DECSCNM in self.mode
return Char(data=" ", fg="default", bg="default", reverse=reverse)
def __init__(self, columns, lines):
self.savepoints = []
self.columns = columns
self.lines = lines
self.buffer = defaultdict(lambda: StaticDefaultDict(self.default_char))
self.dirty = set()
self.reset()
def __repr__(self):
return ("{0}({1}, {2})".format(self.__class__.__name__,
self.columns, self.lines))
@property
def display(self):
"""A :func:`list` of screen lines as unicode strings."""
def render(line):
is_wide_char = False
for x in range(self.columns):
if is_wide_char: # Skip stub
is_wide_char = False
continue
char = line[x].data
assert sum(map(wcwidth, char[1:])) == 0
is_wide_char = wcwidth(char[0]) == 2
yield char
return ["".join(render(self.buffer[y])) for y in range(self.lines)]
def reset(self):
"""Reset the terminal to its initial state.
* Scrolling margins are reset to screen boundaries.
* Cursor is moved to home location -- ``(0, 0)`` and its
attributes are set to defaults (see :attr:`default_char`).
* Screen is cleared -- each character is reset to
:attr:`default_char`.
* Tabstops are reset to "every eight columns".
* All lines are marked as :attr:`dirty`.
.. note::
Neither VT220 nor VT102 manuals mention that terminal modes
and tabstops should be reset as well, thanks to
:manpage:`xterm` -- we now know that.
"""
self.dirty.update(range(self.lines))
self.buffer.clear()
self.margins = None
self.mode = set([mo.DECAWM, mo.DECTCEM])
self.title = ""
self.icon_name = ""
self.charset = 0
self.g0_charset = cs.LAT1_MAP
self.g1_charset = cs.VT100_MAP
# From ``man terminfo`` -- "... hardware tabs are initially
# set every `n` spaces when the terminal is powered up. Since
# we aim to support VT102 / VT220 and linux -- we use n = 8.
self.tabstops = set(range(8, self.columns, 8))
self.cursor = Cursor(0, 0)
self.cursor_position()
self.saved_columns = None
def resize(self, lines=None, columns=None):
"""Resize the screen to the given size.
If the requested screen size has more lines than the existing
screen, lines will be added at the bottom. If the requested
size has less lines than the existing screen lines will be
clipped at the top of the screen. Similarly, if the existing
screen has less columns than the requested screen, columns will
be added at the right, and if it has more -- columns will be
clipped at the right.
:param int lines: number of lines in the new screen.
:param int columns: number of columns in the new screen.
.. versionchanged:: 0.7.0
If the requested screen size is identical to the current screen
size, the method does nothing.
"""
lines = lines or self.lines
columns = columns or self.columns
if lines == self.lines and columns == self.columns:
return # No changes.
self.dirty.update(range(lines))
if lines < self.lines:
self.save_cursor()
self.cursor_position(0, 0)
self.delete_lines(self.lines - lines) # Drop from the top.
self.restore_cursor()
if columns < self.columns:
for line in self.buffer.values():
for x in range(columns, self.columns):
line.pop(x, None)
self.lines, self.columns = lines, columns
self.set_margins()
def set_margins(self, top=None, bottom=None):
"""Select top and bottom margins for the scrolling region.
:param int top: the smallest line number that is scrolled.
:param int bottom: the biggest line number that is scrolled.
"""
if top is None and bottom is None:
self.margins = None
return
margins = self.margins or Margins(0, self.lines - 1)
# Arguments are 1-based, while :attr:`margins` are zero
# based -- so we have to decrement them by one. We also
# make sure that both of them is bounded by [0, lines - 1].
if top is None:
top = margins.top
else:
top = max(0, min(top - 1, self.lines - 1))
if bottom is None:
bottom = margins.bottom
else:
bottom = max(0, min(bottom - 1, self.lines - 1))
# Even though VT102 and VT220 require DECSTBM to ignore
# regions of width less than 2, some programs (like aptitude
# for example) rely on it. Practicality beats purity.
if bottom - top >= 1:
self.margins = Margins(top, bottom)
# The cursor moves to the home position when the top and
# bottom margins of the scrolling region (DECSTBM) changes.
self.cursor_position()
def set_mode(self, *modes, **kwargs):
"""Set (enable) a given list of modes.
:param list modes: modes to set, where each mode is a constant
from :mod:`pyte.modes`.
"""
# Private mode codes are shifted, to be distingiushed from non
# private ones.
if kwargs.get("private"):
modes = [mode << 5 for mode in modes]
if mo.DECSCNM in modes:
self.dirty.update(range(self.lines))
self.mode.update(modes)
# When DECOLM mode is set, the screen is erased and the cursor
# moves to the home position.
if mo.DECCOLM in modes:
self.saved_columns = self.columns
self.resize(columns=132)
self.erase_in_display(2)
self.cursor_position()
# According to VT520 manual, DECOM should also home the cursor.
if mo.DECOM in modes:
self.cursor_position()
# Mark all displayed characters as reverse.
if mo.DECSCNM in modes:
for line in self.buffer.values():
line.default = self.default_char
for x in line:
line[x] = line[x]._replace(reverse=True)
self.select_graphic_rendition(7) # +reverse.
# Make the cursor visible.
if mo.DECTCEM in modes:
self.cursor.hidden = False
def reset_mode(self, *modes, **kwargs):
"""Reset (disable) a given list of modes.
:param list modes: modes to reset -- hopefully, each mode is a
constant from :mod:`pyte.modes`.
"""
# Private mode codes are shifted, to be distinguished from non
# private ones.
if kwargs.get("private"):
modes = [mode << 5 for mode in modes]
if mo.DECSCNM in modes:
self.dirty.update(range(self.lines))
self.mode.difference_update(modes)
# Lines below follow the logic in :meth:`set_mode`.
if mo.DECCOLM in modes:
if self.columns == 132 and self.saved_columns is not None:
self.resize(columns=self.saved_columns)
self.saved_columns = None
self.erase_in_display(2)
self.cursor_position()
if mo.DECOM in modes:
self.cursor_position()
if mo.DECSCNM in modes:
for line in self.buffer.values():
line.default = self.default_char
for x in line:
line[x] = line[x]._replace(reverse=False)
self.select_graphic_rendition(27) # -reverse.
# Hide the cursor.
if mo.DECTCEM in modes:
self.cursor.hidden = True
def define_charset(self, code, mode):
"""Define ``G0`` or ``G1`` charset.
:param str code: character set code, should be a character
from ``"B0UK"``, otherwise ignored.
:param str mode: if ``"("`` ``G0`` charset is defined, if
``")"`` -- we operate on ``G1``.
.. warning:: User-defined charsets are currently not supported.
"""
if code in cs.MAPS:
if mode == "(":
self.g0_charset = cs.MAPS[code]
elif mode == ")":
self.g1_charset = cs.MAPS[code]
def shift_in(self):
"""Select ``G0`` character set."""
self.charset = 0
def shift_out(self):
"""Select ``G1`` character set."""
self.charset = 1
def draw(self, data):
"""Display decoded characters at the current cursor position and
advances the cursor if :data:`~pyte.modes.DECAWM` is set.
:param str data: text to display.
.. versionchanged:: 0.5.0
Character width is taken into account. Specifically, zero-width
and unprintable characters do not affect screen state. Full-width
characters are rendered into two consecutive character containers.
"""
data = data.translate(
self.g1_charset if self.charset else self.g0_charset)
for char in data:
char_width = wcwidth(char)
# If this was the last column in a line and auto wrap mode is
# enabled, move the cursor to the beginning of the next line,
# otherwise replace characters already displayed with newly
# entered.
if self.cursor.x == self.columns:
if mo.DECAWM in self.mode:
self.dirty.add(self.cursor.y)
self.carriage_return()
self.linefeed()
elif char_width > 0:
self.cursor.x -= char_width
# If Insert mode is set, new characters move old characters to
# the right, otherwise terminal is in Replace mode and new
# characters replace old characters at cursor position.
if mo.IRM in self.mode and char_width > 0:
self.insert_characters(char_width)
line = self.buffer[self.cursor.y]
if char_width == 1:
line[self.cursor.x] = self.cursor.attrs._replace(data=char)
elif char_width == 2:
# A two-cell character has a stub slot after it.
line[self.cursor.x] = self.cursor.attrs._replace(data=char)
if self.cursor.x + 1 < self.columns:
line[self.cursor.x + 1] = self.cursor.attrs \
._replace(data="")
elif char_width == 0 and unicodedata.combining(char):
# A zero-cell character is combined with the previous
# character either on this or preceeding line.
if self.cursor.x:
last = line[self.cursor.x - 1]
normalized = unicodedata.normalize("NFC", last.data + char)
line[self.cursor.x - 1] = last._replace(data=normalized)
elif self.cursor.y:
last = self.buffer[self.cursor.y - 1][self.columns - 1]
normalized = unicodedata.normalize("NFC", last.data + char)
self.buffer[self.cursor.y - 1][self.columns - 1] = \
last._replace(data=normalized)
else:
break # Unprintable character or doesn't advance the cursor.
# .. note:: We can't use :meth:`cursor_forward()`, because that
# way, we'll never know when to linefeed.
if char_width > 0:
self.cursor.x = min(self.cursor.x + char_width, self.columns)
self.dirty.add(self.cursor.y)
def set_title(self, param):
"""Set terminal title.
.. note:: This is an XTerm extension supported by the Linux terminal.
"""
self.title = param
def set_icon_name(self, param):
"""Set icon name.
.. note:: This is an XTerm extension supported by the Linux terminal.
"""
self.icon_name = param
def carriage_return(self):
"""Move the cursor to the beginning of the current line."""
self.cursor.x = 0
def index(self):
"""Move the cursor down one line in the same column. If the
cursor is at the last line, create a new line at the bottom.
"""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == bottom:
# TODO: mark only the lines within margins?
self.dirty.update(range(self.lines))
for y in range(top, bottom):
self.buffer[y] = self.buffer[y + 1]
self.buffer.pop(bottom, None)
else:
self.cursor_down()
def reverse_index(self):
"""Move the cursor up one line in the same column. If the cursor
is at the first line, create a new line at the top.
"""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == top:
# TODO: mark only the lines within margins?
self.dirty.update(range(self.lines))
for y in range(bottom, top, -1):
self.buffer[y] = self.buffer[y - 1]
self.buffer.pop(top, None)
else:
self.cursor_up()
def linefeed(self):
"""Perform an index and, if :data:`~pyte.modes.LNM` is set, a
carriage return.
"""
self.index()
if mo.LNM in self.mode:
self.carriage_return()
def tab(self):
"""Move to the next tab space, or the end of the screen if there
aren't anymore left.
"""
for stop in sorted(self.tabstops):
if self.cursor.x < stop:
column = stop
break
else:
column = self.columns - 1
self.cursor.x = column
def backspace(self):
"""Move cursor to the left one or keep it in its position if
it's at the beginning of the line already.
"""
self.cursor_back()
def save_cursor(self):
"""Push the current cursor position onto the stack."""
self.savepoints.append(Savepoint(copy.copy(self.cursor),
self.g0_charset,
self.g1_charset,
self.charset,
mo.DECOM in self.mode,
mo.DECAWM in self.mode))
def restore_cursor(self):
"""Set the current cursor position to whatever cursor is on top
of the stack.
"""
if self.savepoints:
savepoint = self.savepoints.pop()
self.g0_charset = savepoint.g0_charset
self.g1_charset = savepoint.g1_charset
self.charset = savepoint.charset
if savepoint.origin:
self.set_mode(mo.DECOM)
if savepoint.wrap:
self.set_mode(mo.DECAWM)
self.cursor = savepoint.cursor
self.ensure_hbounds()
self.ensure_vbounds(use_margins=True)
else:
# If nothing was saved, the cursor moves to home position;
# origin mode is reset. :todo: DECAWM?
self.reset_mode(mo.DECOM)
self.cursor_position()
def insert_lines(self, count=None):
"""Insert the indicated # of lines at line with cursor. Lines
displayed **at** and below the cursor move down. Lines moved
past the bottom margin are lost.
:param count: number of lines to insert.
"""
count = count or 1
top, bottom = self.margins or Margins(0, self.lines - 1)
# If cursor is outside scrolling margins it -- do nothin'.
if top <= self.cursor.y <= bottom:
self.dirty.update(range(self.cursor.y, self.lines))
for y in range(bottom, self.cursor.y - 1, -1):
if y + count <= bottom and y in self.buffer:
self.buffer[y + count] = self.buffer[y]
self.buffer.pop(y, None)
self.carriage_return()
def delete_lines(self, count=None):
"""Delete the indicated # of lines, starting at line with
cursor. As lines are deleted, lines displayed below cursor
move up. Lines added to bottom of screen have spaces with same
character attributes as last line moved up.
:param int count: number of lines to delete.
"""
count = count or 1
top, bottom = self.margins or Margins(0, self.lines - 1)
# If cursor is outside scrolling margins -- do nothin'.
if top <= self.cursor.y <= bottom:
self.dirty.update(range(self.cursor.y, self.lines))
for y in range(self.cursor.y, bottom + 1):
if y + count <= bottom:
if y + count in self.buffer:
self.buffer[y] = self.buffer.pop(y + count)
else:
self.buffer.pop(y, None)
self.carriage_return()
def insert_characters(self, count=None):
"""Insert the indicated # of blank characters at the cursor
position. The cursor does not move and remains at the beginning
of the inserted blank characters. Data on the line is shifted
forward.
:param int count: number of characters to insert.
"""
self.dirty.add(self.cursor.y)
count = count or 1
line = self.buffer[self.cursor.y]
for x in range(self.columns, self.cursor.x - 1, -1):
if x + count <= self.columns:
line[x + count] = line[x]
line.pop(x, None)
def delete_characters(self, count=None):
"""Delete the indicated # of characters, starting with the
character at cursor position. When a character is deleted, all
characters to the right of cursor move left. Character attributes
move with the characters.
:param int count: number of characters to delete.
"""
self.dirty.add(self.cursor.y)
count = count or 1
line = self.buffer[self.cursor.y]
for x in range(self.cursor.x, self.columns):
if x + count <= self.columns:
line[x] = line.pop(x + count, self.default_char)
else:
line.pop(x, None)
def erase_characters(self, count=None):
"""Erase the indicated # of characters, starting with the
character at cursor position. Character attributes are set
cursor attributes. The cursor remains in the same position.
:param int count: number of characters to erase.
.. note::
Using cursor attributes for character attributes may seem
illogical, but if recall that a terminal emulator emulates
a type writer, it starts to make sense. The only way a type
writer could erase a character is by typing over it.
"""
self.dirty.add(self.cursor.y)
count = count or 1
line = self.buffer[self.cursor.y]
for x in range(self.cursor.x,
min(self.cursor.x + count, self.columns)):
line[x] = self.cursor.attrs
def erase_in_line(self, how=0, private=False):
"""Erase a line in a specific way.
Character attributes are set to cursor attributes.
:param int how: defines the way the line should be erased in:
* ``0`` -- Erases from cursor to end of line, including cursor
position.
* ``1`` -- Erases from beginning of line to cursor,
including cursor position.
* ``2`` -- Erases complete line.
:param bool private: when ``True`` only characters marked as
eraseable are affected **not implemented**.
"""
self.dirty.add(self.cursor.y)
if how == 0:
interval = range(self.cursor.x, self.columns)
elif how == 1:
interval = range(self.cursor.x + 1)
elif how == 2:
interval = range(self.columns)
line = self.buffer[self.cursor.y]
for x in interval:
line[x] = self.cursor.attrs
def erase_in_display(self, how=0, private=False):
"""Erases display in a specific way.
Character attributes are set to cursor attributes.
:param int how: defines the way the line should be erased in:
* ``0`` -- Erases from cursor to end of screen, including
cursor position.
* ``1`` -- Erases from beginning of screen to cursor,
including cursor position.
* ``2`` and ``3`` -- Erases complete display. All lines
are erased and changed to single-width. Cursor does not
move.
:param bool private: when ``True`` only characters marked as
eraseable are affected **not implemented**.
"""
if how == 0:
interval = range(self.cursor.y + 1, self.lines)
elif how == 1:
interval = range(self.cursor.y)
elif how == 2 or how == 3:
interval = range(self.lines)
self.dirty.update(interval)
for y in interval:
line = self.buffer[y]
for x in line:
line[x] = self.cursor.attrs
if how == 0 or how == 1:
self.erase_in_line(how)
def set_tab_stop(self):
"""Set a horizontal tab stop at cursor position."""
self.tabstops.add(self.cursor.x)
def clear_tab_stop(self, how=0):
"""Clear a horizontal tab stop.
:param int how: defines a way the tab stop should be cleared:
* ``0`` or nothing -- Clears a horizontal tab stop at cursor
position.
* ``3`` -- Clears all horizontal tab stops.
"""
if how == 0:
# Clears a horizontal tab stop at cursor position, if it's
# present, or silently fails if otherwise.
self.tabstops.discard(self.cursor.x)
elif how == 3:
self.tabstops = set() # Clears all horizontal tab stops.
def ensure_hbounds(self):
"""Ensure the cursor is within horizontal screen bounds."""
self.cursor.x = min(max(0, self.cursor.x), self.columns - 1)
def ensure_vbounds(self, use_margins=None):
"""Ensure the cursor is within vertical screen bounds.
:param bool use_margins: when ``True`` or when
:data:`~pyte.modes.DECOM` is set,
cursor is bounded by top and and bottom
margins, instead of ``[0; lines - 1]``.
"""
if (use_margins or mo.DECOM in self.mode) and self.margins is not None:
top, bottom = self.margins
else:
top, bottom = 0, self.lines - 1
self.cursor.y = min(max(top, self.cursor.y), bottom)
def cursor_up(self, count=None):
"""Move cursor up the indicated # of lines in same column.
Cursor stops at top margin.
:param int count: number of lines to skip.
"""
top, _bottom = self.margins or Margins(0, self.lines - 1)
self.cursor.y = max(self.cursor.y - (count or 1), top)
def cursor_up1(self, count=None):
"""Move cursor up the indicated # of lines to column 1. Cursor
stops at bottom margin.
:param int count: number of lines to skip.
"""
self.cursor_up(count)
self.carriage_return()
def cursor_down(self, count=None):
"""Move cursor down the indicated # of lines in same column.
Cursor stops at bottom margin.
:param int count: number of lines to skip.
"""
_top, bottom = self.margins or Margins(0, self.lines - 1)
self.cursor.y = min(self.cursor.y + (count or 1), bottom)
def cursor_down1(self, count=None):
"""Move cursor down the indicated # of lines to column 1.
Cursor stops at bottom margin.
:param int count: number of lines to skip.
"""
self.cursor_down(count)
self.carriage_return()
def cursor_back(self, count=None):
"""Move cursor left the indicated # of columns. Cursor stops
at left margin.
:param int count: number of columns to skip.
"""
# Handle the case when we've just drawn in the last column
# and would wrap the line on the next :meth:`draw()` call.
if self.cursor.x == self.columns:
self.cursor.x -= 1
self.cursor.x -= count or 1
self.ensure_hbounds()
def cursor_forward(self, count=None):
"""Move cursor right the indicated # of columns. Cursor stops
at right margin.
:param int count: number of columns to skip.
"""
self.cursor.x += count or 1
self.ensure_hbounds()
def cursor_position(self, line=None, column=None):
"""Set the cursor to a specific `line` and `column`.
Cursor is allowed to move out of the scrolling region only when
:data:`~pyte.modes.DECOM` is reset, otherwise -- the position
doesn't change.
:param int line: line number to move the cursor to.
:param int column: column number to move the cursor to.
"""
column = (column or 1) - 1
line = (line or 1) - 1
# If origin mode (DECOM) is set, line number are relative to
# the top scrolling margin.
if self.margins is not None and mo.DECOM in self.mode:
line += self.margins.top
# Cursor is not allowed to move out of the scrolling region.
if not self.margins.top <= line <= self.margins.bottom:
return
self.cursor.x = column
self.cursor.y = line
self.ensure_hbounds()
self.ensure_vbounds()
def cursor_to_column(self, column=None):
"""Move cursor to a specific column in the current line.
:param int column: column number to move the cursor to.
"""
self.cursor.x = (column or 1) - 1
self.ensure_hbounds()
def cursor_to_line(self, line=None):
"""Move cursor to a specific line in the current column.
:param int line: line number to move the cursor to.
"""
self.cursor.y = (line or 1) - 1
# If origin mode (DECOM) is set, line number are relative to
# the top scrolling margin.
if mo.DECOM in self.mode:
self.cursor.y += self.margins.top
# FIXME: should we also restrict the cursor to the scrolling
# region?
self.ensure_vbounds()
def bell(self, *args):
"""Bell stub -- the actual implementation should probably be
provided by the end-user.
"""
def alignment_display(self):
"""Fills screen with uppercase E's for screen focus and alignment."""
self.dirty.update(range(self.lines))
for y in range(self.lines):
for x in range(self.columns):
self.buffer[y][x] = self.buffer[y][x]._replace(data="E")
def select_graphic_rendition(self, *attrs):
"""Set display attributes.
:param list attrs: a list of display attributes to set.
"""
replace = {}
# Fast path for resetting all attributes.
if not attrs or attrs == (0, ):
self.cursor.attrs = self.default_char
return
else:
attrs = list(reversed(attrs))
while attrs:
attr = attrs.pop()
if attr == 0:
# Reset all attributes.
replace.update(self.default_char._asdict())
elif attr in g.FG_ANSI:
replace["fg"] = g.FG_ANSI[attr]
elif attr in g.BG:
replace["bg"] = g.BG_ANSI[attr]
elif attr in g.TEXT:
attr = g.TEXT[attr]
replace[attr[1:]] = attr.startswith("+")
elif attr in g.FG_AIXTERM:
replace.update(fg=g.FG_AIXTERM[attr], bold=True)
elif attr in g.BG_AIXTERM:
replace.update(bg=g.BG_AIXTERM[attr], bold=True)
elif attr in (g.FG_256, g.BG_256):
key = "fg" if attr == g.FG_256 else "bg"
try:
n = attrs.pop()
if n == 5: # 256.
m = attrs.pop()
replace[key] = g.FG_BG_256[m]
elif n == 2: # 24bit.
# This is somewhat non-standard but is nonetheless
# supported in quite a few terminals. See discussion
# here https://gist.github.com/XVilka/8346728.
replace[key] = "{0:02x}{1:02x}{2:02x}".format(
attrs.pop(), attrs.pop(), attrs.pop())
except IndexError:
pass
self.cursor.attrs = self.cursor.attrs._replace(**replace)
def report_device_attributes(self, mode=0, **kwargs):
"""Report terminal identity.
.. versionadded:: 0.5.0
.. versionchanged:: 0.7.0
If ``private`` keyword argument is set, the method does nothing.
This behaviour is consistent with VT220 manual.
"""
# We only implement "primary" DA which is the only DA request
# VT102 understood, see ``VT102ID`` in ``linux/drivers/tty/vt.c``.
if mode == 0 and not kwargs.get("private"):
self.write_process_input(ctrl.CSI + "?6c")
def report_device_status(self, mode):
"""Report terminal status or cursor position.
:param int mode: if 5 -- terminal status, 6 -- cursor position,
otherwise a noop.
.. versionadded:: 0.5.0
"""
if mode == 5: # Request for terminal status.
self.write_process_input(ctrl.CSI + "0n")
elif mode == 6: # Request for cursor position.
x = self.cursor.x + 1
y = self.cursor.y + 1
# "Origin mode (DECOM) selects line numbering."
if mo.DECOM in self.mode:
y -= self.margins.top
self.write_process_input(ctrl.CSI + "{0};{1}R".format(y, x))
def write_process_input(self, data):
"""Write data to the process running inside the terminal.
By default is a noop.
:param str data: text to write to the process ``stdin``.
.. versionadded:: 0.5.0
"""
def debug(self, *args, **kwargs):
"""Endpoint for unrecognized escape sequences.
By default is a noop.
"""
class DiffScreen(Screen):
"""
A screen subclass, which maintains a set of dirty lines in its
:attr:`dirty` attribute. The end user is responsible for emptying
a set, when a diff is applied.
.. deprecated:: 0.7.0
The functionality contained in this class has been merged into
:class:`~pyte.screens.Screen` and will be removed in 0.8.0.
Please update your code accordingly.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"The functionality of ``DiffScreen` has been merged into "
"``Screen`` and will be removed in 0.8.0. Please update "
"your code accordingly.", DeprecationWarning)
super(DiffScreen, self).__init__(*args, **kwargs)
History = namedtuple("History", "top bottom ratio size position")
class HistoryScreen(Screen):
"""A :class:~`pyte.screens.Screen` subclass, which keeps track
of screen history and allows pagination. This is not linux-specific,
but still useful; see page 462 of VT520 User's Manual.
:param int history: total number of history lines to keep; is split
between top and bottom queues.
:param int ratio: defines how much lines to scroll on :meth:`next_page`
and :meth:`prev_page` calls.
.. attribute:: history
A pair of history queues for top and bottom margins accordingly;
here's the overall screen structure::
[ 1: .......]
[ 2: .......] <- top history
[ 3: .......]
------------
[ 4: .......] s
[ 5: .......] c
[ 6: .......] r
[ 7: .......] e
[ 8: .......] e
[ 9: .......] n
------------
[10: .......]
[11: .......] <- bottom history
[12: .......]
.. note::
Don't forget to update :class:`~pyte.streams.Stream` class with
appropriate escape sequences -- you can use any, since pagination
protocol is not standardized, for example::
Stream.escape["N"] = "next_page"
Stream.escape["P"] = "prev_page"
"""
_wrapped = set(Stream.events)
_wrapped.update(["next_page", "prev_page"])
def __init__(self, columns, lines, history=100, ratio=.5):
self.history = History(deque(maxlen=history),
deque(maxlen=history),
float(ratio),
history,
history)
super(HistoryScreen, self).__init__(columns, lines)
def _make_wrapper(self, event, handler):
def inner(*args, **kwargs):
self.before_event(event)
result = handler(*args, **kwargs)
self.after_event(event)
return result
return inner
def __getattribute__(self, attr):
value = super(HistoryScreen, self).__getattribute__(attr)
if attr in HistoryScreen._wrapped:
return HistoryScreen._make_wrapper(self, attr, value)
else:
return value
def before_event(self, event):
"""Ensure a screen is at the bottom of the history buffer.
:param str event: event name, for example ``"linefeed"``.
"""
if event not in ["prev_page", "next_page"]:
while self.history.position < self.history.size:
self.next_page()
def after_event(self, event):
"""Ensure all lines on a screen have proper width (:attr:`columns`).
Extra characters are truncated, missing characters are filled
with whitespace.
:param str event: event name, for example ``"linefeed"``.
"""
if event in ["prev_page", "next_page"]:
for line in self.buffer.values():
for x in line:
if x > self.columns:
line.pop(x)
# If we're at the bottom of the history buffer and `DECTCEM`
# mode is set -- show the cursor.
self.cursor.hidden = not (
self.history.position == self.history.size and
mo.DECTCEM in self.mode
)
def _reset_history(self):
self.history.top.clear()
self.history.bottom.clear()
self.history = self.history._replace(position=self.history.size)
def reset(self):
"""Overloaded to reset screen history state: history position
is reset to bottom of both queues; queues themselves are
emptied.
"""
super(HistoryScreen, self).reset()
self._reset_history()
def erase_in_display(self, how=0):
"""Overloaded to reset history state."""
super(HistoryScreen, self).erase_in_display(how)
if how == 3:
self._reset_history()
def index(self):
"""Overloaded to update top history with the removed lines."""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == bottom:
self.history.top.append(self.buffer[top])
super(HistoryScreen, self).index()
def reverse_index(self):
"""Overloaded to update bottom history with the removed lines."""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == top:
self.history.bottom.append(self.buffer[bottom])
super(HistoryScreen, self).reverse_index()
def prev_page(self):
"""Move the screen page up through the history buffer. Page
size is defined by ``history.ratio``, so for instance
``ratio = .5`` means that half the screen is restored from
history on page switch.
"""
if self.history.position > self.lines and self.history.top:
mid = min(len(self.history.top),
int(math.ceil(self.lines * self.history.ratio)))
self.history.bottom.extendleft(
self.buffer[y]
for y in range(self.lines - 1, self.lines - mid - 1, -1))
self.history = self.history \
._replace(position=self.history.position - mid)
for y in range(self.lines - 1, mid - 1, -1):
self.buffer[y] = self.buffer[y - mid]
for y in range(mid - 1, -1, -1):
self.buffer[y] = self.history.top.pop()
self.dirty = set(range(self.lines))
def next_page(self):
"""Move the screen page down through the history buffer."""
if self.history.position < self.history.size and self.history.bottom:
mid = min(len(self.history.bottom),
int(math.ceil(self.lines * self.history.ratio)))
self.history.top.extend(self.buffer[y] for y in range(mid))
self.history = self.history \
._replace(position=self.history.position + mid)
for y in range(self.lines - mid):
self.buffer[y] = self.buffer[y + mid]
for y in range(self.lines - mid, self.lines):
self.buffer[y] = self.history.bottom.popleft()
self.dirty = set(range(self.lines))
class DebugEvent(namedtuple("Event", "name args kwargs")):
"""Event dispatched to :class:`~pyte.screens.DebugScreen`.
.. warning::
This is developer API with no backward compatibility guarantees.
Use at your own risk!
"""
@staticmethod
def from_string(line):
return DebugEvent(*json.loads(line))
def __str__(self):
return json.dumps(self)
def __call__(self, screen):
"""Execute this event on a given ``screen``."""
return getattr(screen, self.name)(*self.args, **self.kwargs)
class DebugScreen(object):
r"""A screen which dumps a subset of the received events to a file.
>>> import io
>>> with io.StringIO() as buf:
... stream = Stream(DebugScreen(to=buf))
... stream.feed("\x1b[1;24r\x1b[4l\x1b[24;1H\x1b[0;10m")
... print(buf.getvalue())
...
... # doctest: +NORMALIZE_WHITESPACE
["set_margins", [1, 24], {}]
["reset_mode", [4], {}]
["cursor_position", [24, 1], {}]
["select_graphic_rendition", [0, 10], {}]
:param file to: a file-like object to write debug information to.
:param list only: a list of events you want to debug (empty by
default, which means -- debug all events).
.. warning::
This is developer API with no backward compatibility guarantees.
Use at your own risk!
"""
def __init__(self, to=sys.stderr, only=()):
self.to = to
self.only = only
def only_wrapper(self, attr):
def wrapper(*args, **kwargs):
self.to.write(str(DebugEvent(attr, args, kwargs)))
self.to.write(str(os.linesep))
return wrapper
def __getattribute__(self, attr):
if attr not in Stream.events:
return super(DebugScreen, self).__getattribute__(attr)
elif not self.only or attr in self.only:
return self.only_wrapper(attr)
else:
return lambda *args, **kwargs: None
| mit |
zgchizi/oppia-uc | core/counters.py | 12 | 3320 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Services for performance counters."""
class PerfCounter(object):
"""Generic in-process numeric counter; not aggregated across instances."""
# TODO(sll): Add aggregation across instances.
def __init__(self, name, description):
if name in Registry.get_all_counter_names():
raise Exception('Counter %s already exists.' % name)
self._name = name
self._description = description
self._value = 0
Registry.register_counter(self.name, self)
def inc(self, increment=1):
"""Increments the counter value by a given increment."""
self._value += increment
@property
def name(self):
return self._name
@property
def description(self):
return self._description
@property
def value(self):
return self._value
class Registry(object):
"""Registry of all counters."""
_counters = {}
@classmethod
def get_all_counter_names(cls):
return cls._counters.keys()
@classmethod
def get_all_counters(cls):
return cls._counters.values()
@classmethod
def register_counter(cls, name, counter_class):
cls._counters[name] = counter_class
MEMCACHE_HIT = PerfCounter(
'memcache-hit',
'Number of times an object was found in memcache')
MEMCACHE_MISS = PerfCounter(
'memcache-miss',
'Number of times an object was not found in memcache')
MEMCACHE_SET_SUCCESS = PerfCounter(
'memcache-set-success',
'Number of times an object was successfully put in memcache')
MEMCACHE_SET_FAILURE = PerfCounter(
'memcache-set-failure',
'Number of times an object failed to be put in memcache')
MEMCACHE_DELETE_SUCCESS = PerfCounter(
'memcache-delete-success',
'Number of times an object was successfully deleted from memcache')
MEMCACHE_DELETE_MISSING = PerfCounter(
'memcache-delete-missing',
'Number of attempts to delete a non-existent object from memcache')
MEMCACHE_DELETE_FAILURE = PerfCounter(
'memcache-delete-failure',
'Number of times an object failed to be deleted from memcache')
HTML_RESPONSE_TIME_SECS = PerfCounter(
'html-response-time-secs',
'Total processing time for all HTML responses, in seconds')
HTML_RESPONSE_COUNT = PerfCounter(
'html-response-count',
'Number of times a HTML response was sent out')
JSON_RESPONSE_TIME_SECS = PerfCounter(
'json-response-time-secs',
'Total processing time for all JSON responses, in seconds')
JSON_RESPONSE_COUNT = PerfCounter(
'json-response-count',
'Number of times a JSON response was sent out')
EMAILS_SENT = PerfCounter(
'emails-sent',
'Number of times a call to send_mail() was made')
| apache-2.0 |
suraj-jayakumar/lstm-rnn-ad | src/testdata/random_data_time_series/generate_data.py | 1 | 1042 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 23 11:15:12 2016
@author: suraj
"""
import random
import numpy as np
import pickle
import matplotlib.pyplot as plt
attachRateList = []
for i in range(3360):
attachRateList.append(random.uniform(4,6))
attachRateList = np.array(attachRateList)
encoded_attach_rate_list = np.fft.fft(attachRateList)
day_number_list = [i%7 for i in range(3360)]
encoded_day_number_list = np.fft.fft(day_number_list)
time_number_list = [i%96 for i in range(3360)]
encoded_time_number_list = np.fft.fft(time_number_list)
final_list_x = np.array([[encoded_day_number_list.real[i],encoded_day_number_list.imag[i],encoded_time_number_list.real[i],encoded_time_number_list.imag[i],encoded_attach_rate_list.real[i],encoded_attach_rate_list.imag[i]] for i in range(3360)])
final_list_y = [ (encoded_attach_rate_list[i].real,encoded_attach_rate_list[i].imag) for i in range(len(encoded_attach_rate_list)) ]
pickle.dump(final_list_x,open('x_att.p','wb'))
pickle.dump(final_list_y,open('y_att.p','wb'))
| apache-2.0 |
brentdax/swift | utils/gyb_syntax_support/ExprNodes.py | 5 | 19659 | from Child import Child
from Node import Node # noqa: I201
EXPR_NODES = [
# An inout expression.
# &x
Node('InOutExpr', kind='Expr',
children=[
Child('Ampersand', kind='PrefixAmpersandToken'),
Child('Expression', kind='Expr'),
]),
# A #column expression.
Node('PoundColumnExpr', kind='Expr',
children=[
Child('PoundColumn', kind='PoundColumnToken'),
]),
Node('FunctionCallArgumentList', kind='SyntaxCollection',
element='FunctionCallArgument'),
Node('TupleElementList', kind='SyntaxCollection',
element='TupleElement'),
Node('ArrayElementList', kind='SyntaxCollection',
element='ArrayElement'),
Node('DictionaryElementList', kind='SyntaxCollection',
element='DictionaryElement'),
Node('StringInterpolationSegments', kind='SyntaxCollection',
element='Syntax', element_name='Segment',
element_choices=['StringSegment', 'ExpressionSegment']),
# The try operator.
# try foo()
# try? foo()
# try! foo()
Node('TryExpr', kind='Expr',
children=[
Child('TryKeyword', kind='TryToken'),
Child('QuestionOrExclamationMark', kind='Token',
is_optional=True,
token_choices=[
'PostfixQuestionMarkToken',
'ExclamationMarkToken',
]),
Child('Expression', kind='Expr'),
]),
# declname-arguments -> '(' declname-argument-list ')'
# declname-argument-list -> declname-argument*
# declname-argument -> identifier ':'
Node('DeclNameArgument', kind='Syntax',
children=[
Child('Name', kind='Token'),
Child('Colon', kind='ColonToken'),
]),
Node('DeclNameArgumentList', kind='SyntaxCollection',
element='DeclNameArgument'),
Node('DeclNameArguments', kind='Syntax',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('Arguments', kind='DeclNameArgumentList'),
Child('RightParen', kind='RightParenToken'),
]),
# An identifier expression.
Node('IdentifierExpr', kind='Expr',
children=[
Child('Identifier', kind='Token',
token_choices=[
'IdentifierToken',
'SelfToken',
'CapitalSelfToken',
'DollarIdentifierToken',
'SpacedBinaryOperatorToken',
]),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True),
]),
# An 'super' expression.
Node('SuperRefExpr', kind='Expr',
children=[
Child('SuperKeyword', kind='SuperToken'),
]),
# A nil expression.
Node('NilLiteralExpr', kind='Expr',
children=[
Child('NilKeyword', kind='NilToken'),
]),
# A _ expression.
Node('DiscardAssignmentExpr', kind='Expr',
children=[
Child('Wildcard', kind='WildcardToken'),
]),
# An = expression.
Node('AssignmentExpr', kind='Expr',
children=[
Child('AssignToken', kind='EqualToken'),
]),
# A flat list of expressions before sequence folding, e.g. 1 + 2 + 3.
Node('SequenceExpr', kind='Expr',
children=[
Child('Elements', kind='ExprList'),
]),
Node('ExprList', kind='SyntaxCollection',
element='Expr',
element_name='Expression'),
# A #line expression.
Node('PoundLineExpr', kind='Expr',
children=[
Child('PoundLine', kind='PoundLineToken'),
]),
# A #file expression.
Node('PoundFileExpr', kind='Expr',
children=[
Child('PoundFile', kind='PoundFileToken'),
]),
# A #function expression.
Node('PoundFunctionExpr', kind='Expr',
children=[
Child('PoundFunction', kind='PoundFunctionToken'),
]),
# A #dsohandle expression.
Node('PoundDsohandleExpr', kind='Expr',
children=[
Child('PoundDsohandle', kind='PoundDsohandleToken'),
]),
# symbolic-reference-expression -> identifier generic-argument-clause?
Node('SymbolicReferenceExpr', kind='Expr',
children=[
Child('Identifier', kind='IdentifierToken'),
Child('GenericArgumentClause', kind='GenericArgumentClause',
is_optional=True),
]),
# A prefix operator expression.
# -x
# !true
Node('PrefixOperatorExpr', kind='Expr',
children=[
Child('OperatorToken', kind='PrefixOperatorToken',
is_optional=True),
Child('PostfixExpression', kind='Expr'),
]),
# An operator like + or -.
# NOTE: This appears only in SequenceExpr.
Node('BinaryOperatorExpr', kind='Expr',
children=[
Child('OperatorToken', kind='BinaryOperatorToken'),
]),
# arrow-expr -> 'throws'? '->'
# NOTE: This appears only in SequenceExpr.
Node('ArrowExpr', kind='Expr',
children=[
Child('ThrowsToken', kind='ThrowsToken',
is_optional=True),
Child('ArrowToken', kind='ArrowToken'),
]),
# A floating-point literal
# 4.0
# -3.9
# +4e20
Node('FloatLiteralExpr', kind='Expr',
children=[
Child('FloatingDigits', kind='FloatingLiteralToken'),
]),
Node('TupleExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('ElementList', kind='TupleElementList'),
Child('RightParen', kind='RightParenToken'),
]),
# Array literal, e.g. [1, 2, 3]
Node('ArrayExpr', kind='Expr',
children=[
Child('LeftSquare', kind='LeftSquareBracketToken'),
Child('Elements', kind='ArrayElementList'),
Child('RightSquare', kind='RightSquareBracketToken'),
]),
# Dictionary literal, e.g. [1:1, 2:2, 3:3]
Node('DictionaryExpr', kind='Expr',
children=[
Child('LeftSquare', kind='LeftSquareBracketToken'),
Child('Content', kind='Syntax',
node_choices=[
Child('Colon', kind='ColonToken'),
Child('Elements', kind='DictionaryElementList'),
]),
Child('RightSquare', kind='RightSquareBracketToken'),
]),
# .foo
Node('ImplicitMemberExpr', kind='Expr',
children=[
Child("Dot", kind='PrefixPeriodToken'),
Child("Name", kind='Token'),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True),
]),
# function-call-argument -> label? ':'? expression ','?
Node('FunctionCallArgument', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Label', kind='IdentifierToken',
is_optional=True),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Expression', kind='Expr'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# An element inside a tuple element list
Node('TupleElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Label', kind='IdentifierToken',
is_optional=True),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Expression', kind='Expr'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# element inside an array expression: expression ','?
Node('ArrayElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Expression', kind='Expr'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# element inside an array expression: expression ','?
Node('DictionaryElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('KeyExpression', kind='Expr'),
Child('Colon', kind='ColonToken'),
Child('ValueExpression', kind='Expr'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# An integer literal.
# 3
# +3_400
# +0x4f
Node('IntegerLiteralExpr', kind='Expr',
children=[
Child('Digits', kind='IntegerLiteralToken'),
]),
Node('StringLiteralExpr', kind='Expr',
children=[
Child("StringLiteral", kind='StringLiteralToken')
]),
# true or false
Node('BooleanLiteralExpr', kind='Expr',
children=[
Child("BooleanLiteral", kind='Token',
token_choices=[
'TrueToken',
'FalseToken',
])
]),
# a ? 1 : 0
Node('TernaryExpr', kind='Expr',
children=[
Child("ConditionExpression", kind='Expr'),
Child("QuestionMark", kind='InfixQuestionMarkToken'),
Child("FirstChoice", kind='Expr'),
Child("ColonMark", kind='ColonToken'),
Child("SecondChoice", kind='Expr')
]),
# a.b
Node('MemberAccessExpr', kind='Expr',
children=[
# The base needs to be optional to parse expressions in key paths
# like \.a
Child("Base", kind='Expr', is_optional=True),
Child("Dot", kind='PeriodToken'),
Child("Name", kind='Token'),
Child('DeclNameArguments', kind='DeclNameArguments',
is_optional=True),
]),
# dot-self-expr -> expr '.' 'self'
Node('DotSelfExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('Dot', kind='Token',
token_choices=[
'PeriodToken', 'PrefixPeriodToken'
]),
Child('SelfKeyword', kind='SelfToken'),
]),
# is TypeName
Node('IsExpr', kind='Expr',
children=[
Child("IsTok", kind='IsToken'),
Child("TypeName", kind='Type')
]),
# as TypeName
Node('AsExpr', kind='Expr',
children=[
Child("AsTok", kind='AsToken'),
Child("QuestionOrExclamationMark", kind='Token',
is_optional=True,
token_choices=[
'PostfixQuestionMarkToken',
'ExclamationMarkToken',
]),
Child("TypeName", kind='Type')
]),
# Type
Node('TypeExpr', kind='Expr',
children=[
Child('Type', kind='Type'),
]),
Node('ClosureCaptureItem', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child("Specifier", kind='TokenList', is_optional=True),
Child("Name", kind='IdentifierToken', is_optional=True),
Child('AssignToken', kind='EqualToken', is_optional=True),
Child("Expression", kind='Expr'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
Node('ClosureCaptureItemList', kind='SyntaxCollection',
element='ClosureCaptureItem'),
Node('ClosureCaptureSignature', kind='Syntax',
children=[
Child('LeftSquare', kind='LeftSquareBracketToken'),
Child('Items', kind='ClosureCaptureItemList', is_optional=True),
Child('RightSquare', kind='RightSquareBracketToken'),
]),
Node('ClosureParam', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Name', kind='Token',
token_choices=[
'IdentifierToken',
'WildcardToken',
]),
Child('TrailingComma', kind='CommaToken', is_optional=True),
]),
# a, b, c
Node('ClosureParamList', kind='SyntaxCollection', element='ClosureParam'),
Node('ClosureSignature', kind='Syntax',
children=[
Child('Capture', kind='ClosureCaptureSignature',
is_optional=True),
Child('Input', kind='Syntax', is_optional=True,
node_choices=[
Child('SimpleInput', kind='ClosureParamList'),
Child('Input', kind='ParameterClause'),
]),
Child('ThrowsTok', kind='ThrowsToken', is_optional=True),
Child('Output', kind='ReturnClause', is_optional=True),
Child('InTok', kind='InToken'),
]),
Node('ClosureExpr', kind='Expr',
traits=['Braced', 'WithStatements'],
children=[
Child('LeftBrace', kind='LeftBraceToken'),
Child('Signature', kind='ClosureSignature', is_optional=True),
Child('Statements', kind='CodeBlockItemList'),
Child('RightBrace', kind='RightBraceToken'),
]),
# unresolved-pattern-expr -> pattern
Node('UnresolvedPatternExpr', kind='Expr',
children=[
Child('Pattern', kind='Pattern'),
]),
# call-expr -> expr '(' call-argument-list ')' closure-expr?
# | expr closure-expr
Node('FunctionCallExpr', kind='Expr',
children=[
Child('CalledExpression', kind='Expr'),
Child('LeftParen', kind='LeftParenToken',
is_optional=True),
Child('ArgumentList', kind='FunctionCallArgumentList'),
Child('RightParen', kind='RightParenToken',
is_optional=True),
Child('TrailingClosure', kind='ClosureExpr',
is_optional=True),
]),
# subscript-expr -> expr '[' call-argument-list ']' closure-expr?
Node('SubscriptExpr', kind='Expr',
children=[
Child('CalledExpression', kind='Expr'),
Child('LeftBracket', kind='LeftSquareBracketToken'),
Child('ArgumentList', kind='FunctionCallArgumentList'),
Child('RightBracket', kind='RightSquareBracketToken'),
Child('TrailingClosure', kind='ClosureExpr',
is_optional=True),
]),
# optional-chaining-expr -> expr '?'
Node('OptionalChainingExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('QuestionMark', kind='PostfixQuestionMarkToken'),
]),
# forced-value-expr -> expr '!'
Node('ForcedValueExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('ExclamationMark', kind='ExclamationMarkToken'),
]),
# postfix-unary-expr -> expr postfix-operator
Node('PostfixUnaryExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('OperatorToken', kind='PostfixOperatorToken'),
]),
# specialize-expr -> expr generic-argument-clause?
Node('SpecializeExpr', kind='Expr',
children=[
Child('Expression', kind='Expr'),
Child('GenericArgumentClause', kind='GenericArgumentClause'),
]),
# string literal segment in a string interpolation expression.
Node('StringSegment', kind='Syntax',
children=[
Child('Content', kind='StringSegmentToken'),
]),
# expression segment in a string interpolation expression.
Node('ExpressionSegment', kind='Syntax',
traits=['Parenthesized'],
children=[
Child('Backslash', kind='BackslashToken'),
Child('LeftParen', kind='LeftParenToken',
classification='StringInterpolationAnchor',
force_classification=True),
Child('Expression', kind='Expr'),
Child('RightParen', kind='StringInterpolationAnchorToken'),
]),
# e.g. "abc \(foo()) def"
Node('StringInterpolationExpr', kind='Expr',
children=[
Child('OpenQuote', kind='Token',
token_choices=[
'StringQuoteToken',
'MultilineStringQuoteToken',
]),
Child('Segments', kind='StringInterpolationSegments'),
Child('CloseQuote', kind='Token',
token_choices=[
'StringQuoteToken',
'MultilineStringQuoteToken',
]),
]),
# e.g. "\a.b[2].a"
Node('KeyPathExpr', kind='Expr',
children=[
Child('Backslash', kind='BackslashToken'),
Child('RootExpr', kind='Expr', is_optional=True,
node_choices=[
Child('IdentifierExpr', kind='IdentifierExpr'),
Child('SpecializeExpr', kind='SpecializeExpr')
]),
Child('Expression', kind='Expr'),
]),
# The period in the key path serves as the base on which the
# right-hand-side of the key path is evaluated
Node('KeyPathBaseExpr', kind='Expr',
children=[
Child('Period', kind='PeriodToken'),
]),
# e.g. "a." or "a"
Node('ObjcNamePiece', kind='Syntax',
children=[
Child('Name', kind='IdentifierToken'),
Child('Dot', kind='PeriodToken', is_optional=True),
]),
# e.g. "a.b.c"
Node('ObjcName', kind='SyntaxCollection', element='ObjcNamePiece'),
# e.g. "#keyPath(a.b.c)"
Node('ObjcKeyPathExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('KeyPath', kind='PoundKeyPathToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Name', kind='ObjcName'),
Child('RightParen', kind='RightParenToken'),
]),
# e.g. "#selector(getter:Foo.bar)"
Node('ObjcSelectorExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('PoundSelector', kind='PoundSelectorToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Kind', kind='ContextualKeywordToken',
text_choices=['getter', 'setter'],
is_optional=True),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Name', kind='Expr'),
Child('RightParen', kind='RightParenToken'),
]),
# <#content#>
Node('EditorPlaceholderExpr', kind='Expr',
children=[
Child('Identifier', kind='IdentifierToken'),
]),
# #fileLiteral(a, b, c)
Node('ObjectLiteralExpr', kind='Expr',
traits=['Parenthesized'],
children=[
Child('Identifier', kind='Token',
token_choices=[
'PoundColorLiteralToken',
'PoundFileLiteralToken',
'PoundImageLiteralToken',
]),
Child('LeftParen', kind='LeftParenToken'),
Child('Arguments', kind='FunctionCallArgumentList'),
Child('RightParen', kind='RightParenToken'),
]),
]
| apache-2.0 |
mythsmith/veusz | veusz/document/svg_export.py | 4 | 21041 | # Copyright (C) 2010 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
"""A home-brewed SVG paint engine for doing svg with clipping
and exporting text as paths for WYSIWYG."""
from __future__ import division, print_function
import re
from ..compat import crange, cbytes
from .. import qtall as qt4
# dpi runs at many times usual, and results are scaled down
# helps fix point issues in font sizes
dpi = 900.
scale = 0.1
inch_mm = 25.4
inch_pt = 72.0
def printpath(path):
"""Debugging print path."""
print("Contents of", path)
for i in crange(path.elementCount()):
el = path.elementAt(i)
print(" ", el.type, el.x, el.y)
def fltStr(v, prec=2):
"""Change a float to a string, using a maximum number of decimal places
but removing trailing zeros."""
# ensures consistent rounding behaviour on different platforms
v = round(v, prec+2)
val = ('% 20.10f' % v)[:10+prec]
# drop any trailing zeros
val = val.rstrip('0').lstrip(' ').rstrip('.')
# get rid of -0s (platform differences here)
if val == '-0':
val = '0'
return val
def escapeXML(text):
"""Escape special characters in XML."""
# we have swap & with an unused character, so we can replace it later
text = text.replace('&', u'\ue001')
text = text.replace('<', '<')
text = text.replace('>', '>')
text = text.replace('"', '"')
text = text.replace("'", ''')
text = text.replace(u'\ue001', '&')
return text
def createPath(path):
"""Convert qt path to svg path.
We use relative coordinates to make the file size smaller and help
compression
"""
p = []
count = path.elementCount()
i = 0
ox, oy = 0, 0
while i < count:
e = path.elementAt(i)
nx, ny = e.x*scale, e.y*scale
if e.type == qt4.QPainterPath.MoveToElement:
p.append( 'm%s,%s' % (fltStr(nx-ox), fltStr(ny-oy)) )
ox, oy = nx, ny
elif e.type == qt4.QPainterPath.LineToElement:
p.append( 'l%s,%s' % (fltStr(nx-ox), fltStr(ny-oy)) )
ox, oy = nx, ny
elif e.type == qt4.QPainterPath.CurveToElement:
e1 = path.elementAt(i+1)
e2 = path.elementAt(i+2)
p.append( 'c%s,%s,%s,%s,%s,%s' % (
fltStr(nx-ox), fltStr(ny-oy),
fltStr(e1.x*scale-ox), fltStr(e1.y*scale-oy),
fltStr(e2.x*scale-ox), fltStr(e2.y*scale-oy)) )
ox, oy = e2.x*scale, e2.y*scale
i += 2
else:
assert False
i += 1
return ''.join(p)
class SVGElement(object):
"""SVG element in output.
This represents the XML tree in memory
"""
def __init__(self, parent, eltype, attrb, text=None):
"""Intialise element.
parent: parent element or None
eltype: type (e.g. 'polyline')
attrb: attribute string appended to output
text: text to output between this and closing element.
"""
self.eltype = eltype
self.attrb = attrb
self.children = []
self.parent = parent
self.text = text
if parent:
parent.children.append(self)
def write(self, fileobj):
"""Write element and its children to the output file."""
fileobj.write('<%s' % self.eltype)
if self.attrb:
fileobj.write(' ' + self.attrb)
if self.text:
fileobj.write('>%s</%s>\n' % (self.text, self.eltype))
elif self.children:
fileobj.write('>\n')
for c in self.children:
c.write(fileobj)
fileobj.write('</%s>\n' % self.eltype)
else:
# simple close tag if not children or text
fileobj.write('/>\n')
class SVGPaintEngine(qt4.QPaintEngine):
"""Paint engine class for writing to svg files."""
def __init__(self, width_in, height_in, writetextastext=False):
"""Create the class, using width and height as size of canvas
in inches."""
qt4.QPaintEngine.__init__(self,
qt4.QPaintEngine.Antialiasing |
qt4.QPaintEngine.PainterPaths |
qt4.QPaintEngine.PrimitiveTransform |
qt4.QPaintEngine.PaintOutsidePaintEvent |
qt4.QPaintEngine.PixmapTransform |
qt4.QPaintEngine.AlphaBlend
)
self.width = width_in
self.height = height_in
self.imageformat = 'png'
self.writetextastext = writetextastext
def begin(self, paintdevice):
"""Start painting."""
self.device = paintdevice
self.pen = qt4.QPen()
self.brush = qt4.QBrush()
self.clippath = None
self.clipnum = 0
self.existingclips = {}
self.transform = qt4.QTransform()
# svg root element for qt defaults
self.rootelement = SVGElement(
None, 'svg',
('width="%spx" height="%spx" version="1.1"\n'
' xmlns="http://www.w3.org/2000/svg"\n'
' xmlns:xlink="http://www.w3.org/1999/xlink"') %
(fltStr(self.width*dpi*scale), fltStr(self.height*dpi*scale)))
SVGElement(self.rootelement, 'desc', '', 'Veusz output document')
# definitions, for clips, etc.
self.defs = SVGElement(self.rootelement, 'defs', '')
# this is where all the drawing goes
self.celement = SVGElement(
self.rootelement, 'g',
'stroke-linejoin="bevel" stroke-linecap="square" '
'stroke="#000000" fill-rule="evenodd"')
# previous transform, stroke and clip states
self.oldstate = [None, None, None]
# cache paths to avoid duplication
self.pathcache = {}
self.pathcacheidx = 0
return True
def pruneEmptyGroups(self):
"""Take the element tree and remove any empty group entries."""
def recursive(root):
children = list(root.children)
# remove any empty children first
for c in children:
recursive(c)
if root.eltype == 'g' and len(root.children) == 0:
# safe to remove
index = root.parent.children.index(root)
del root.parent.children[index]
# merge equal groups
last = None
i = 0
while i < len(root.children):
this = root.children[i]
if ( last is not None and
last.eltype == this.eltype and last.attrb == this.attrb
and last.text == this.text ):
last.children += this.children
del root.children[i]
else:
last = this
i += 1
recursive(self.rootelement)
def end(self):
self.pruneEmptyGroups()
fileobj = self.device.fileobj
fileobj.write('<?xml version="1.0" standalone="no"?>\n'
'<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"\n'
' "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
# write all the elements
self.rootelement.write(fileobj)
return True
def _updateClipPath(self, clippath, clipoperation):
"""Update clip path given state change."""
clippath = self.transform.map(clippath)
if clipoperation == qt4.Qt.NoClip:
self.clippath = None
elif clipoperation == qt4.Qt.ReplaceClip:
self.clippath = clippath
elif clipoperation == qt4.Qt.IntersectClip:
self.clippath = self.clippath.intersected(clippath)
elif clipoperation == qt4.Qt.UniteClip:
self.clippath = self.clippath.united(clippath)
else:
assert False
def updateState(self, state):
"""Examine what has changed in state and call apropriate function."""
ss = state.state()
# state is a list of transform, stroke/fill and clip states
statevec = list(self.oldstate)
if ss & qt4.QPaintEngine.DirtyTransform:
self.transform = state.transform()
statevec[0] = self.transformState()
if ss & qt4.QPaintEngine.DirtyPen:
self.pen = state.pen()
statevec[1] = self.strokeFillState()
if ss & qt4.QPaintEngine.DirtyBrush:
self.brush = state.brush()
statevec[1] = self.strokeFillState()
if ss & qt4.QPaintEngine.DirtyClipPath:
self._updateClipPath(state.clipPath(), state.clipOperation())
statevec[2] = self.clipState()
if ss & qt4.QPaintEngine.DirtyClipRegion:
path = qt4.QPainterPath()
path.addRegion(state.clipRegion())
self._updateClipPath(path, state.clipOperation())
statevec[2] = self.clipState()
# work out which state differs first
pop = 0
for i in crange(2, -1, -1):
if statevec[i] != self.oldstate[i]:
pop = i+1
break
# go back up the tree the required number of times
for i in crange(pop):
if self.oldstate[i]:
self.celement = self.celement.parent
# create new elements for changed states
for i in crange(pop-1, -1, -1):
if statevec[i]:
self.celement = SVGElement(
self.celement, 'g', ' '.join(statevec[i]))
self.oldstate = statevec
def clipState(self):
"""Get SVG clipping state. This is in the form of an svg group"""
if self.clippath is None:
return ()
path = createPath(self.clippath)
if path in self.existingclips:
url = 'url(#c%i)' % self.existingclips[path]
else:
clippath = SVGElement(self.defs, 'clipPath',
'id="c%i"' % self.clipnum)
SVGElement(clippath, 'path', 'd="%s"' % path)
url = 'url(#c%i)' % self.clipnum
self.existingclips[path] = self.clipnum
self.clipnum += 1
return ('clip-path="%s"' % url,)
def strokeFillState(self):
"""Return stroke-fill state."""
vals = {}
p = self.pen
# - color
color = p.color().name()
if color != '#000000':
vals['stroke'] = p.color().name()
# - opacity
if p.color().alphaF() != 1.:
vals['stroke-opacity'] = '%.3g' % p.color().alphaF()
# - join style
if p.joinStyle() != qt4.Qt.BevelJoin:
vals['stroke-linejoin'] = {
qt4.Qt.MiterJoin: 'miter',
qt4.Qt.SvgMiterJoin: 'miter',
qt4.Qt.RoundJoin: 'round',
qt4.Qt.BevelJoin: 'bevel'
}[p.joinStyle()]
# - cap style
if p.capStyle() != qt4.Qt.SquareCap:
vals['stroke-linecap'] = {
qt4.Qt.FlatCap: 'butt',
qt4.Qt.SquareCap: 'square',
qt4.Qt.RoundCap: 'round'
}[p.capStyle()]
# - width
w = p.widthF()
# width 0 is device width for qt
if w == 0.:
w = 1./scale
vals['stroke-width'] = fltStr(w*scale)
# - line style
if p.style() == qt4.Qt.NoPen:
vals['stroke'] = 'none'
elif p.style() not in (qt4.Qt.SolidLine, qt4.Qt.NoPen):
# convert from pen width fractions to pts
nums = [fltStr(scale*w*x) for x in p.dashPattern()]
vals['stroke-dasharray'] = ','.join(nums)
# BRUSH STYLES
b = self.brush
if b.style() == qt4.Qt.NoBrush:
vals['fill'] = 'none'
else:
vals['fill'] = b.color().name()
if b.color().alphaF() != 1.0:
vals['fill-opacity'] = '%.3g' % b.color().alphaF()
items = ['%s="%s"' % x for x in sorted(vals.items())]
return tuple(items)
def transformState(self):
if not self.transform.isIdentity():
m = self.transform
dx, dy = m.dx(), m.dy()
if (m.m11(), m.m12(), m.m21(), m.m22()) == (1., 0., 0., 1):
out = ('transform="translate(%s,%s)"' % (
fltStr(dx*scale), fltStr(dy*scale)) ,)
else:
out = ('transform="matrix(%s %s %s %s %s %s)"' % (
fltStr(m.m11(), 4), fltStr(m.m12(), 4),
fltStr(m.m21(), 4), fltStr(m.m22(), 4),
fltStr(dx*scale), fltStr(dy*scale) ),)
else:
out = ()
return out
def drawPath(self, path):
"""Draw a path on the output."""
p = createPath(path)
attrb = 'd="%s"' % p
if path.fillRule() == qt4.Qt.WindingFill:
attrb += ' fill-rule="nonzero"'
if attrb in self.pathcache:
element, num = self.pathcache[attrb]
if num is None:
# this is the first time an element has been referenced again
# assign it an id for use below
num = self.pathcacheidx
self.pathcacheidx += 1
self.pathcache[attrb] = element, num
# add an id attribute
element.attrb += ' id="p%i"' % num
# if the parent is a translation, swallow this into the use element
m = re.match('transform="translate\(([-0-9.]+),([-0-9.]+)\)"',
self.celement.attrb)
if m:
SVGElement(self.celement.parent, 'use',
'xlink:href="#p%i" x="%s" y="%s"' % (
num, m.group(1), m.group(2)))
else:
SVGElement(self.celement, 'use', 'xlink:href="#p%i"' % num)
else:
pathel = SVGElement(self.celement, 'path', attrb)
self.pathcache[attrb] = [pathel, None]
def drawTextItem(self, pt, textitem):
"""Convert text to a path and draw it.
"""
if self.writetextastext:
# size
f = textitem.font()
if f.pixelSize() > 0:
size = f.pixelSize()*scale
else:
size = f.pointSizeF()*scale*dpi/inch_pt
font = textitem.font()
grpattrb = [
'stroke="none"',
'fill="%s"' % self.pen.color().name(),
'fill-opacity="%.3g"' % self.pen.color().alphaF(),
'font-family="%s"' % escapeXML(font.family()),
'font-size="%s"' % size,
]
if font.italic():
grpattrb.append('font-style="italic"')
if font.bold():
grpattrb.append('font-weight="bold"')
grp = SVGElement(
self.celement, 'g',
' '.join(grpattrb) )
text = escapeXML( textitem.text() )
textattrb = [
'x="%s"' % fltStr(pt.x()*scale),
'y="%s"' % fltStr(pt.y()*scale),
'textLength="%s"' % fltStr(textitem.width()*scale),
]
# spaces get lost without this
if text.find(' ') >= 0 or text[:1] == ' ' or text[-1:] == ' ':
textattrb.append('xml:space="preserve"')
# write as an SVG text element
SVGElement(
grp, 'text',
' '.join(textattrb),
text=text )
else:
# convert to a path
path = qt4.QPainterPath()
path.addText(pt, textitem.font(), textitem.text())
p = createPath(path)
SVGElement(
self.celement, 'path',
'd="%s" fill="%s" stroke="none" fill-opacity="%.3g"' % (
p, self.pen.color().name(), self.pen.color().alphaF()) )
def drawLines(self, lines):
"""Draw multiple lines."""
paths = []
for line in lines:
path = 'M%s,%sl%s,%s' % (
fltStr(line.x1()*scale), fltStr(line.y1()*scale),
fltStr((line.x2()-line.x1())*scale),
fltStr((line.y2()-line.y1())*scale))
paths.append(path)
SVGElement(self.celement, 'path', 'd="%s"' % ''.join(paths))
def drawPolygon(self, points, mode):
"""Draw polygon on output."""
pts = []
for p in points:
pts.append( '%s,%s' % (fltStr(p.x()*scale), fltStr(p.y()*scale)) )
if mode == qt4.QPaintEngine.PolylineMode:
SVGElement(self.celement, 'polyline',
'fill="none" points="%s"' % ' '.join(pts))
else:
attrb = 'points="%s"' % ' '.join(pts)
if mode == qt4.Qt.WindingFill:
attrb += ' fill-rule="nonzero"'
SVGElement(self.celement, 'polygon', attrb)
def drawEllipse(self, rect):
"""Draw an ellipse to the svg file."""
SVGElement(self.celement, 'ellipse',
'cx="%s" cy="%s" rx="%s" ry="%s"' %
(fltStr(rect.center().x()*scale),
fltStr(rect.center().y()*scale),
fltStr(rect.width()*0.5*scale),
fltStr(rect.height()*0.5*scale)))
def drawPoints(self, points):
"""Draw points."""
for pt in points:
x, y = fltStr(pt.x()*scale), fltStr(pt.y()*scale)
SVGElement(self.celement, 'line',
('x1="%s" y1="%s" x2="%s" y2="%s" '
'stroke-linecap="round"') % (x, y, x, y))
def drawImage(self, r, img, sr, flags):
"""Draw image.
As the pixmap method uses the same code, just call this."""
self.drawPixmap(r, img, sr)
def drawPixmap(self, r, pixmap, sr):
"""Draw pixmap svg item.
This is converted to a bitmap and embedded in the output
"""
# convert pixmap to textual data
data = qt4.QByteArray()
buf = qt4.QBuffer(data)
buf.open(qt4.QBuffer.ReadWrite)
pixmap.save(buf, self.imageformat.upper(), 0)
buf.close()
attrb = [ 'x="%s" y="%s" ' % (fltStr(r.x()*scale), fltStr(r.y()*scale)),
'width="%s" ' % fltStr(r.width()*scale),
'height="%s" ' % fltStr(r.height()*scale),
'xlink:href="data:image/%s;base64,' % self.imageformat,
cbytes(data.toBase64()).decode('ascii'),
'" preserveAspectRatio="none"' ]
SVGElement(self.celement, 'image', ''.join(attrb))
def type(self):
"""A random number for the engine."""
return qt4.QPaintEngine.User + 11
class SVGPaintDevice(qt4.QPaintDevice):
"""Paint device for SVG paint engine."""
def __init__(self, fileobj, width_in, height_in,
writetextastext=False):
qt4.QPaintDevice.__init__(self)
self.engine = SVGPaintEngine(width_in, height_in,
writetextastext=writetextastext)
self.fileobj = fileobj
def paintEngine(self):
return self.engine
def metric(self, m):
"""Return the metrics of the painter."""
if m == qt4.QPaintDevice.PdmWidth:
return int(self.engine.width * dpi)
elif m == qt4.QPaintDevice.PdmHeight:
return int(self.engine.height * dpi)
elif m == qt4.QPaintDevice.PdmWidthMM:
return int(self.engine.width * inch_mm)
elif m == qt4.QPaintDevice.PdmHeightMM:
return int(self.engine.height * inch_mm)
elif m == qt4.QPaintDevice.PdmNumColors:
return 2147483647
elif m == qt4.QPaintDevice.PdmDepth:
return 24
elif m == qt4.QPaintDevice.PdmDpiX:
return int(dpi)
elif m == qt4.QPaintDevice.PdmDpiY:
return int(dpi)
elif m == qt4.QPaintDevice.PdmPhysicalDpiX:
return int(dpi)
elif m == qt4.QPaintDevice.PdmPhysicalDpiY:
return int(dpi)
| gpl-2.0 |
alianmohammad/pd-gem5 | util/on-chip-network-power-area.py | 49 | 8472 | # Copyright (c) 2014 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ConfigParser import ConfigParser
import string, sys, subprocess, os
# Compile DSENT to generate the Python module and then import it.
# This script assumes it is executed from the gem5 root.
print("Attempting compilation")
from subprocess import call
src_dir = 'ext/dsent'
build_dir = 'build/ext/dsent'
if not os.path.exists(build_dir):
os.makedirs(build_dir)
os.chdir(build_dir)
error = call(['cmake', '../../../%s' % src_dir])
if error:
print("Failed to run cmake")
exit(-1)
error = call(['make'])
if error:
print("Failed to run make")
exit(-1)
print("Compiled dsent")
os.chdir("../../../")
sys.path.append("build/ext/dsent")
import dsent
# Parse gem5 config.ini file for the configuration parameters related to
# the on-chip network.
def parseConfig(config_file):
config = ConfigParser()
if not config.read(config_file):
print("ERROR: config file '", config_file, "' not found")
sys.exit(1)
if not config.has_section("system.ruby.network"):
print("ERROR: Ruby network not found in '", config_file)
sys.exit(1)
if config.get("system.ruby.network", "type") != "GarnetNetwork_d" :
print("ERROR: Garnet network not used in '", config_file)
sys.exit(1)
number_of_virtual_networks = config.getint("system.ruby.network",
"number_of_virtual_networks")
vcs_per_vnet = config.getint("system.ruby.network", "vcs_per_vnet")
buffers_per_data_vc = config.getint("system.ruby.network",
"buffers_per_data_vc")
buffers_per_control_vc = config.getint("system.ruby.network",
"buffers_per_ctrl_vc")
ni_flit_size_bits = 8 * config.getint("system.ruby.network",
"ni_flit_size")
routers = config.get("system.ruby.network", "routers").split()
int_links = config.get("system.ruby.network", "int_links").split()
ext_links = config.get("system.ruby.network", "ext_links").split()
return (config, number_of_virtual_networks, vcs_per_vnet,
buffers_per_data_vc, buffers_per_control_vc, ni_flit_size_bits,
routers, int_links, ext_links)
def getClock(obj, config):
if config.get(obj, "type") == "SrcClockDomain":
return config.getint(obj, "clock")
if config.get(obj, "type") == "DerivedClockDomain":
source = config.get(obj, "clk_domain")
divider = config.getint(obj, "clk_divider")
return getClock(source, config) / divider
source = config.get(obj, "clk_domain")
return getClock(source, config)
## Compute the power consumed by the given router
def computeRouterPowerAndArea(router, stats_file, config, int_links, ext_links,
number_of_virtual_networks, vcs_per_vnet,
buffers_per_data_vc, buffers_per_control_vc,
ni_flit_size_bits):
frequency = getClock(router, config)
num_ports = 0
for int_link in int_links:
if config.get(int_link, "node_a") == router or \
config.get(int_link, "node_b") == router:
num_ports += 1
for ext_link in ext_links:
if config.get(ext_link, "int_node") == router:
num_ports += 1
power = dsent.computeRouterPowerAndArea(frequency, num_ports, num_ports,
number_of_virtual_networks,
vcs_per_vnet, buffers_per_data_vc,
ni_flit_size_bits)
print("%s Power: " % router, power)
## Compute the power consumed by the given link
def computeLinkPower(link, stats_file, config, sim_seconds):
frequency = getClock(link + ".nls0", config)
power = dsent.computeLinkPower(frequency)
print("%s.nls0 Power: " % link, power)
frequency = getClock(link + ".nls1", config)
power = dsent.computeLinkPower(frequency)
print("%s.nls1 Power: " % link, power)
def parseStats(stats_file, config, router_config_file, link_config_file,
routers, int_links, ext_links, number_of_virtual_networks,
vcs_per_vnet, buffers_per_data_vc, buffers_per_control_vc,
ni_flit_size_bits):
# Open the stats.txt file and parse it to for the required numbers
# and the number of routers.
try:
stats_handle = open(stats_file, 'r')
stats_handle.close()
except IOError:
print("Failed to open ", stats_file, " for reading")
exit(-1)
# Now parse the stats
pattern = "sim_seconds"
lines = string.split(subprocess.check_output(
["grep", pattern, stats_file]), '\n', -1)
assert len(lines) >= 1
## Assume that the first line is the one required
[l1,l2,l3] = lines[0].partition(" ")
l4 = l3.strip().partition(" ")
simulation_length_in_seconds = float(l4[0])
# Initialize DSENT with a configuration file
dsent.initialize(router_config_file)
# Compute the power consumed by the routers
for router in routers:
computeRouterPowerAndArea(router, stats_file, config, int_links,
ext_links, number_of_virtual_networks,
vcs_per_vnet, buffers_per_data_vc,
buffers_per_control_vc, ni_flit_size_bits)
# Finalize DSENT
dsent.finalize()
# Initialize DSENT with a configuration file
dsent.initialize(link_config_file)
# Compute the power consumed by the links
for link in int_links:
computeLinkPower(link, stats_file, config,
simulation_length_in_seconds)
for link in ext_links:
computeLinkPower(link, stats_file, config,
simulation_length_in_seconds)
# Finalize DSENT
dsent.finalize()
# This script parses the config.ini and the stats.txt from a run and
# generates the power and the area of the on-chip network using DSENT
def main():
if len(sys.argv) != 5:
print("Usage: ", sys.argv[0], " <gem5 root directory> " \
"<simulation directory> <router config file> <link config file>")
exit(-1)
print("WARNING: configuration files for DSENT and McPAT are separate. " \
"Changes made to one are not reflected in the other.")
(config, number_of_virtual_networks, vcs_per_vnet, buffers_per_data_vc,
buffers_per_control_vc, ni_flit_size_bits, routers, int_links,
ext_links) = parseConfig("%s/%s/config.ini" % (sys.argv[1], sys.argv[2]))
parseStats("%s/%s/stats.txt" % (sys.argv[1], sys.argv[2]), config,
sys.argv[3], sys.argv[4], routers, int_links, ext_links,
number_of_virtual_networks, vcs_per_vnet, buffers_per_data_vc,
buffers_per_control_vc, ni_flit_size_bits)
if __name__ == "__main__":
main()
| bsd-3-clause |
keyurpatel076/MissionPlannerGit | Lib/tarfile.py | 40 | 88997 | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustäbel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Read from and write to tar format archives.
"""
__version__ = "$Revision$"
# $Source$
version = "0.9.0"
__author__ = "Lars Gustäbel (lars@gustaebel.de)"
__date__ = "$Date$"
__cvsid__ = "$Id$"
__credits__ = "Gustavo Niemeyer, Niels Gustäbel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import shutil
import stat
import errno
import time
import struct
import copy
import re
import operator
try:
import grp, pwd
except ImportError:
grp = pwd = None
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = "\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = "ustar \0" # magic gnu tar string
POSIX_MAGIC = "ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = "0" # regular file
AREGTYPE = "\0" # regular file
LNKTYPE = "1" # link (inside tarfile)
SYMTYPE = "2" # symbolic link
CHRTYPE = "3" # character special device
BLKTYPE = "4" # block special device
DIRTYPE = "5" # directory
FIFOTYPE = "6" # fifo special device
CONTTYPE = "7" # contiguous file
GNUTYPE_LONGNAME = "L" # GNU tar longname
GNUTYPE_LONGLINK = "K" # GNU tar longlink
GNUTYPE_SPARSE = "S" # GNU tar sparse file
XHDTYPE = "x" # POSIX.1-2001 extended header
XGLTYPE = "g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = "X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0120000 # symbolic link
S_IFREG = 0100000 # regular file
S_IFBLK = 0060000 # block device
S_IFDIR = 0040000 # directory
S_IFCHR = 0020000 # character device
S_IFIFO = 0010000 # fifo
TSUID = 04000 # set UID on execution
TSGID = 02000 # set GID on execution
TSVTX = 01000 # reserved
TUREAD = 0400 # read by owner
TUWRITE = 0200 # write by owner
TUEXEC = 0100 # execute/search by owner
TGREAD = 0040 # read by group
TGWRITE = 0020 # write by group
TGEXEC = 0010 # execute/search by group
TOREAD = 0004 # read by other
TOWRITE = 0002 # write by other
TOEXEC = 0001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
ENCODING = sys.getfilesystemencoding()
if ENCODING is None:
ENCODING = sys.getdefaultencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length):
"""Convert a python string to a null-terminated string buffer.
"""
return s[:length] + (length - len(s)) * NUL
def nts(s):
"""Convert a null-terminated string field to a python string.
"""
# Use the string up to the first null char.
p = s.find("\0")
if p == -1:
return s
return s[:p]
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0200):
try:
n = int(nts(s) or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0L
for i in xrange(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = "%0*o" % (digits - 1, n) + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = ""
for i in xrange(digits - 1):
s = chr(n & 0377) + s
n >>= 8
s = chr(0200) + s
return s
def uts(s, encoding, errors):
"""Convert a unicode object to a string.
"""
if errors == "utf-8":
# An extra error handler similar to the -o invalid=UTF-8 option
# in POSIX.1-2001. Replace untranslatable characters with their
# UTF-8 representation.
try:
return s.encode(encoding, "strict")
except UnicodeEncodeError:
x = []
for c in s:
try:
x.append(c.encode(encoding, "strict"))
except UnicodeEncodeError:
x.append(c.encode("utf8"))
return "".join(x)
else:
return s.encode(encoding, errors)
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in xrange(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile:
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream:
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = ""
self.pos = 0L
self.closed = False
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32("") & 0xffffffffL
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = ""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", long(time.time()))
self.__write("\037\213\010\010%s\002\377" % timestamp)
if self.name.endswith(".gz"):
self.name = self.name[:-3]
self.__write(self.name + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc) & 0xffffffffL
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = ""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffffL))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFFL))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = ""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != "\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != "\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in xrange(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
t = [self.dbuf]
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
t.append(buf)
c += len(buf)
t = "".join(t)
self.dbuf = t[size:]
return t[:size]
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
t = [self.buf]
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
t.append(buf)
c += len(buf)
t = "".join(t)
self.buf = t[size:]
return t[:size]
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith("\037\213\010"):
return "gz"
if self.buf.startswith("BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = ""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
b = [self.buf]
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
b.append(data)
x += len(data)
self.buf = "".join(b)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, sparse=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.sparse = sparse
self.position = 0
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
if self.sparse is None:
return self.readnormal(size)
else:
return self.readsparse(size)
def readnormal(self, size):
"""Read operation for regular files.
"""
self.fileobj.seek(self.offset + self.position)
self.position += size
return self.fileobj.read(size)
def readsparse(self, size):
"""Read operation for sparse files.
"""
data = []
while size > 0:
buf = self.readsparsesection(size)
if not buf:
break
size -= len(buf)
data.append(buf)
return "".join(data)
def readsparsesection(self, size):
"""Read a single section of a sparse file.
"""
section = self.sparse.find(self.position)
if section is None:
return ""
size = min(size, section.offset + section.size - self.position)
if isinstance(section, _data):
realpos = section.realpos + self.position - section.offset
self.fileobj.seek(self.offset + realpos)
self.position += size
return self.fileobj.read(size)
else:
self.position += size
return NUL * size
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
getattr(tarinfo, "sparse", None))
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = ""
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = ""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = ""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if "\n" in self.buffer:
pos = self.buffer.find("\n") + 1
else:
buffers = [self.buffer]
while True:
buf = self.fileobj.read(self.blocksize)
buffers.append(buf)
if not buf or "\n" in buf:
self.buffer = "".join(buffers)
pos = self.buffer.find("\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = ""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self, encoding, errors):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 07777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
for key in ("name", "linkname", "uname", "gname"):
if type(info[key]) is unicode:
info[key] = info[key].encode(encoding, errors)
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="strict"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info(encoding, errors)
if format == USTAR_FORMAT:
return self.create_ustar_header(info)
elif format == GNU_FORMAT:
return self.create_gnu_header(info)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding, errors)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT)
def create_gnu_header(self, info):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = ""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME)
return buf + self._create_header(info, GNU_FORMAT)
def create_pax_header(self, info, encoding, errors):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
val = info[name].decode(encoding, errors)
# Try to encode the string as ASCII.
try:
val.encode("ascii")
except UnicodeEncodeError:
pax_headers[hname] = val
continue
if len(info[name]) > length:
pax_headers[hname] = val
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = unicode(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers)
else:
buf = ""
return buf + self._create_header(info, USTAR_FORMAT)
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, type=XGLTYPE)
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100),
itn(info.get("mode", 0) & 07777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100),
stn(info.get("magic", POSIX_MAGIC), 8),
stn(info.get("uname", ""), 32),
stn(info.get("gname", ""), 32),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155)
]
buf = struct.pack("%ds" % BLOCKSIZE, "".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + "%06o\0" % chksum + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name += NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type=XHDTYPE):
"""Return a POSIX.1-2001 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be unicode objects.
"""
records = []
for keyword, value in pax_headers.iteritems():
keyword = keyword.encode("utf8")
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records.append("%d %s=%s\n" % (p, keyword, value))
records = "".join(records)
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT) + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf):
"""Construct a TarInfo object from a 512 byte string buffer.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.buf = buf
obj.name = nts(buf[0:100])
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257])
obj.uname = nts(buf[265:297])
obj.gname = nts(buf[297:329])
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500])
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
buf = self.buf
sp = _ringbuffer()
pos = 386
lastpos = 0L
realpos = 0L
# There are 4 possible sparse structs in the
# first header.
for i in xrange(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[482])
origsize = nti(buf[483:495])
# If the isextended flag is given,
# there are extra headers to process.
while isextended == 1:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in xrange(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[504])
if lastpos < origsize:
sp.append(_hole(lastpos, origsize - lastpos))
self.sparse = sp
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2001.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(r"(\d+) ([^=]+)=", re.U)
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
keyword = keyword.decode("utf8")
value = value.decode("utf8")
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.iteritems():
if keyword not in PAX_FIELDS:
continue
if keyword == "path":
value = value.rstrip("/")
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
else:
value = uts(value, encoding, errors)
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.type == GNUTYPE_SPARSE
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors=None, pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
if errors is not None:
self.errors = errors
elif mode == "r":
self.errors = "utf-8"
else:
self.errors = "strict"
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError, e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def _getposix(self):
return self.format == USTAR_FORMAT
def _setposix(self, value):
import warnings
warnings.warn("use the format attribute instead", DeprecationWarning,
2)
if value:
self.format = USTAR_FORMAT
else:
self.format = GNU_FORMAT
posix = property(_getposix, _setposix)
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError), e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
t = cls(name, filemode,
_Stream(name, filemode, comptype, fileobj, bufsize),
**kwargs)
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
try:
t = cls.taropen(name, mode,
gzip.GzipFile(name, mode + "b", compresslevel, fileobj),
**kwargs)
except IOError:
raise ReadError("not a gzip file")
t._extfileobj = False
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0L
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print filemode(tarinfo.mode),
print "%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid),
if tarinfo.ischr() or tarinfo.isblk():
print "%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)),
else:
print "%10d" % tarinfo.size,
print "%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6],
print tarinfo.name + ("/" if tarinfo.isdir() else ""),
if verbose:
if tarinfo.issym():
print "->", tarinfo.linkname,
if tarinfo.islnk():
print "link to", tarinfo.linkname,
print
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0700
self.extract(tarinfo, path)
# Reverse sort directories.
directories.sort(key=operator.attrgetter('name'))
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path=""):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'.
"""
self._check("r")
if isinstance(member, basestring):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name))
except EnvironmentError, e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, basestring):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0700)
except EnvironmentError, e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.extractfile(tarinfo)
target = bltn_open(targetpath, "wb")
copyfileobj(source, target)
source.close()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
if hasattr(os, "symlink") and hasattr(os, "link"):
# For systems that support symbolic and hard links.
if tarinfo.issym():
if os.path.lexists(targetpath):
os.unlink(targetpath)
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
if os.path.lexists(targetpath):
os.unlink(targetpath)
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo), targetpath)
else:
try:
self._extract_member(self._find_link_target(tarinfo), targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
try:
g = grp.getgrgid(tarinfo.gid)[2]
except KeyError:
g = os.getgid()
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
try:
u = pwd.getpwuid(tarinfo.uid)[2]
except KeyError:
u = os.getuid()
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError, e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError, e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError, e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError, e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError, e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError, e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError, e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print >> sys.stderr, msg
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter:
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def next(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
# Helper classes for sparse file support
class _section:
"""Base class for _data and _hole.
"""
def __init__(self, offset, size):
self.offset = offset
self.size = size
def __contains__(self, offset):
return self.offset <= offset < self.offset + self.size
class _data(_section):
"""Represent a data section in a sparse file.
"""
def __init__(self, offset, size, realpos):
_section.__init__(self, offset, size)
self.realpos = realpos
class _hole(_section):
"""Represent a hole section in a sparse file.
"""
pass
class _ringbuffer(list):
"""Ringbuffer class which increases performance
over a regular list.
"""
def __init__(self):
self.idx = 0
def find(self, offset):
idx = self.idx
while True:
item = self[idx]
if offset in item:
break
idx += 1
if idx == len(self):
idx = 0
if idx == self.idx:
# End of File
return None
self.idx = idx
return item
#---------------------------------------------
# zipfile compatible TarFile class
#---------------------------------------------
TAR_PLAIN = 0 # zipfile.ZIP_STORED
TAR_GZIPPED = 8 # zipfile.ZIP_DEFLATED
class TarFileCompat:
"""TarFile class compatible with standard module zipfile's
ZipFile class.
"""
def __init__(self, file, mode="r", compression=TAR_PLAIN):
from warnings import warnpy3k
warnpy3k("the TarFileCompat class has been removed in Python 3.0",
stacklevel=2)
if compression == TAR_PLAIN:
self.tarfile = TarFile.taropen(file, mode)
elif compression == TAR_GZIPPED:
self.tarfile = TarFile.gzopen(file, mode)
else:
raise ValueError("unknown compression constant")
if mode[0:1] == "r":
members = self.tarfile.getmembers()
for m in members:
m.filename = m.name
m.file_size = m.size
m.date_time = time.gmtime(m.mtime)[:6]
def namelist(self):
return map(lambda m: m.name, self.infolist())
def infolist(self):
return filter(lambda m: m.type in REGULAR_TYPES,
self.tarfile.getmembers())
def printdir(self):
self.tarfile.list()
def testzip(self):
return
def getinfo(self, name):
return self.tarfile.getmember(name)
def read(self, name):
return self.tarfile.extractfile(self.tarfile.getmember(name)).read()
def write(self, filename, arcname=None, compress_type=None):
self.tarfile.add(filename, arcname)
def writestr(self, zinfo, bytes):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import calendar
tinfo = TarInfo(zinfo.filename)
tinfo.size = len(bytes)
tinfo.mtime = calendar.timegm(zinfo.date_time)
self.tarfile.addfile(tinfo, StringIO(bytes))
def close(self):
self.tarfile.close()
#class TarFileCompat
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
| gpl-3.0 |
nychitman1/android_kernel_lge_hammerhead | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
joequery/django | django/contrib/admin/templatetags/admin_list.py | 173 | 17480 | from __future__ import unicode_literals
import datetime
import warnings
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
display_for_field, display_for_value, label_for_field, lookup_field,
)
from django.contrib.admin.views.main import (
ALL_VAR, ORDER_VAR, PAGE_VAR, SEARCH_VAR,
)
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import NoReverseMatch
from django.db import models
from django.template import Library
from django.template.loader import get_template
from django.utils import formats
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.html import escapejs, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl, i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{}</span> ', i + 1)
else:
return format_html('<a href="{}"{}>{}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages - 1 else ''),
i + 1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(
field_name, cl.model,
model_admin=cl.model_admin,
return_attr=True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"class_attrib": format_html(' class="column-{}"', field_name),
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{}"', ' '.join(th_classes)) if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.svg' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{}" alt="{}" />', icon_url, field_val)
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
def link_in_col(is_first, field_name, cl):
if cl.list_display_links is None:
return False
if is_first and not cl.list_display_links:
return True
return field_name in cl.list_display_links
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
empty_value_display = cl.model_admin.get_empty_value_display()
row_classes = ['field-%s' % field_name]
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(attr, 'empty_value_display', empty_value_display)
if f is None or f.auto_created:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
result_repr = display_for_value(value, empty_value_display, boolean)
if allow_tags:
warnings.warn(
"Deprecated allow_tags attribute used on field {}. "
"Use django.utils.safestring.format_html(), "
"format_html_join(), or mark_safe() instead.".format(field_name),
RemovedInDjango20Warning
)
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f.remote_field, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f, empty_value_display)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_classes.append('nowrap')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
# If list_display_links not defined, add the link tag to the first field
if link_in_col(first, field_name, cl):
table_tag = 'th' if first else 'td'
first = False
# Display link to the result's change_view if the url exists, else
# display just the result's representation.
try:
url = cl.url_for_result(result)
except NoReverseMatch:
link_or_text = result_repr
else:
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = escapejs(value)
link_or_text = format_html(
'<a href="{}"{}>{}</a>',
url,
format_html(
' onclick="opener.dismissRelatedLookupPopup(window, '
''{}'); return false;"', result_id
) if cl.is_popup else '',
result_repr)
yield format_html('<{}{}>{}</{}>',
table_tag,
row_class,
link_or_text,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{}>{}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
field = cl.opts.get_field(field_name)
dates_or_datetimes = 'datetimes' if isinstance(field, models.DateTimeField) else 'dates'
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda filters: cl.get_query_string(filters, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, dates_or_datetimes)(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, dates_or_datetimes)(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, dates_or_datetimes)(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render({
'title': spec.title,
'choices': list(spec.choices(cl)),
'spec': spec,
})
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
| bsd-3-clause |
kalyptorisk/daversy | src/daversy/difflib_ext.py | 1 | 8527 | import re, difflib
def merge_group(list, func, start=True, end=True):
l, r, s = list[0]
first = ['',' class="first"'][start]
last = ['',' class="last"'][end]
if len(list) == 1:
if start and end:
return LINE_FORMAT % func(' class="first last"', l, r)
else:
return LINE_FORMAT % func(first+last, l, r)
html = LINE_FORMAT % func(first, l, r)
for i in range(1, len(list)-1):
l, r, s = list[i]
html += LINE_FORMAT % func('', l, r)
l, r, s = list[-1]
html += LINE_FORMAT % func(last, l, r)
return html
def make_table(table_id, header, fromlines, tolines, context=None, versions=['old', 'new']):
diff = list(difflib._mdiff(fromlines, tolines, context))
if not diff:
return None
same = lambda c, l, r: (c, l[0], r[0], 'l', format_line(l[1]))
add = lambda c, l, r: (c, '', r[0], 'r', format_line(r[1]))
sub = lambda c, l, r: (c, l[0], '', 'l', format_line(l[1]))
html = TABLE_HEADER % tuple([table_id, header] + versions)
for type, start, end in group_types(diff):
if type == 'same':
html += '<tbody>%s</tbody>\n' % \
merge_group(diff[start:end], same)
elif type == 'add':
html += '<tbody class="add">%s</tbody>\n' % \
merge_group(diff[start:end], add)
elif type == 'del':
html += '<tbody class="rem">%s</tbody>\n' % \
merge_group(diff[start:end], sub)
elif type == 'mod':
html += '<tbody class="mod">%s%s</tbody>\n' % \
(merge_group(diff[start:end], sub, end=False),
merge_group(diff[start:end], add, start=False))
elif type == 'skipped':
html += '<tbody class="skipped"><tr><th>...</th><th>...</th><td> </td></tr></tbody>\n'
html += TABLE_FOOTER
return html
def get_type(left, right, status):
if not status:
if left or right:
return 'same'
else:
return 'skipped'
l_num, l_line = left
r_num, r_line = right
if l_num and not r_num:
return 'del'
elif r_num and not l_num:
return 'add'
else:
return 'mod'
def group_types(diff):
items = [get_type(l,r,s) for l,r,s in diff]
group = []
if not items:
print diff
start, current = 0, items[0]
for i in range(1, len(diff)):
if items[i] != current:
group.append( (current, start, i) )
current = items[i]
start = i
group.append( (current, start, len(diff)) )
return group
REPLACE_CHARS = [
('&', '&'),
('<', '<'),
('>', '>'),
(' ', ' '),
('"', '"'),
('\0+', '<span class="ins">'),
('\0-', '<span class="del">'),
('\0^', '<span class="chg">'),
('\1', '</span>')
]
SINGLE_CHANGE = re.compile("^\0[\+\-\^]([^\0]+)\1\n?$")
def format_line(text):
text = text.replace('\n', '')
match = SINGLE_CHANGE.match(text)
if match:
text = match.group(1)
for src, replace in REPLACE_CHARS:
text = text.replace(src, replace)
return text
## the majority of the CSS and markup has been used from Trac
TABLE_HEADER = """
<li class='entry' id='%s'>
<h2>%s</h2>
<table class="inline" summary="Differences" cellspacing="0">
<colgroup><col class="lineno" /><col class="lineno" /><col class="content" /></colgroup>
<thead><th>%s</th><th>%s</th><th> </th></thead>
"""
TABLE_FOOTER = """
</table>
</li>
"""
LINE_FORMAT = "<tr%s><th>%s</th><th>%s</th><td class='%s'><span>%s</span> </td></tr>"
HTML_HEADER = """
<html><head><style type='text/css'>
/* Diff preferences */
#prefs fieldset { margin: 1em .5em .5em; padding: .5em 1em 0 }
/* Diff/change overview */
#overview {
line-height: 130%;
margin-top: 1em;
padding: .5em;
}
#overview dt {
font-weight: bold;
padding-right: .25em;
position: absolute;
left: 0;
text-align: right;
width: 7.75em;
}
#overview dd { margin-left: 8em }
/* Colors for change types */
#chglist .edit, #overview .mod, .diff #legend .mod { background: #fd8 }
#chglist .delete, #overview .rem, .diff #legend .rem { background: #f88 }
#chglist .add, #overview .add, .diff #legend .add { background: #bfb }
#chglist .copy, #overview .cp, .diff #legend .cp { background: #88f }
#chglist .move, #overview .mv, .diff #legend .mv { background: #ccc }
#chglist .unknown { background: #fff }
/* Legend for diff colors */
.diff #legend {
float: left;
font-size: 9px;
line-height: 1em;
margin: 1em 0;
padding: .5em;
}
.diff #legend h3 { display: none; }
.diff #legend dt {
background: #fff;
border: 1px solid #999;
float: left;
margin: .1em .5em .1em 2em;
overflow: hidden;
width: .8em; height: .8em;
}
.diff #legend dl, .diff #legend dd {
display: inline;
float: left;
padding: 0;
margin: 0;
margin-right: .5em;
}
/* Styles for the list of diffs */
.diff ul.entries { clear: both; margin: 0; padding: 0 }
.diff li.entry {
background: #f7f7f7;
border: 1px solid #d7d7d7;
list-style-type: none;
margin: 0 0 2em;
padding: 2px;
position: relative;
}
.diff h2 {
color: #333;
font-size: 14px;
letter-spacing: normal;
margin: 0 auto;
padding: .1em 0 .25em .5em;
}
/* Styles for the actual diff tables (side-by-side and inline) */
.diff table {
border: 1px solid #ddd;
border-spacing: 0;
border-top: 0;
empty-cells: show;
font-size: 12px;
line-height: 130%;
padding: 0;
margin: 0 auto;
width: 100%;
}
.diff table col.lineno { width: 4em }
.diff table th {
border-right: 1px solid #d7d7d7;
border-bottom: 1px solid #998;
font-size: 11px;
}
.diff table thead th {
background: #eee;
border-top: 1px solid #d7d7d7;
color: #999;
padding: 0 .25em;
text-align: center;
white-space: nowrap;
}
.diff table tbody th {
background: #eed;
color: #886;
font-weight: normal;
padding: 0 .5em;
text-align: right;
vertical-align: top;
}
.diff table tbody td {
background: #fff;
font: normal 11px monospace;
overflow: hidden;
padding: 1px 2px;
vertical-align: top;
}
.diff table tbody.skipped td {
background: #f7f7f7;
border: 1px solid #d7d7d7;
}
.diff table td span.del, .diff table td span.ins { text-decoration: none }
.diff table td span.del { color: #600 }
.diff table td span.ins { color: #060 }
/* Styles for the inline diff */
.diff table.inline tbody.mod td.l, .diff table.inline tbody.rem td.l {
background: #fdd;
border-color: #c00;
border-style: solid;
border-width: 0 1px 0 1px;
}
.diff table.inline tbody.mod td.r, .diff table.inline tbody.add td.r {
background: #dfd;
border-color: #0a0;
border-style: solid;
border-width: 0 1px 0 1px;
}
.diff table.inline tbody.mod tr.first td.l,
.diff table.inline tbody.rem tr.first td.l { border-top-width: 1px }
.diff table.inline tbody.mod tr.last td.l,
.diff table.inline tbody.rem tr.last td.l { border-bottom-width: 1px }
.diff table.inline tbody.mod tr.first td.r,
.diff table.inline tbody.add tr.first td.r { border-top-width: 1px }
.diff table.inline tbody.mod tr.last td.r,
.diff table.inline tbody.add tr.last td.r { border-bottom-width: 1px }
.diff table.inline tbody.mod td span.del { background: #e99; color: #000 }
.diff table.inline tbody.mod td span.ins { background: #9e9; color: #000 }
.diff table.inline tbody.mod td span.chg { background: #ee9; color: #000 }
/* Styles for the side-by-side diff */
.diff table.sidebyside colgroup.content { width: 50% }
.diff table.sidebyside tbody.mod td.l { background: #fe9 }
.diff table.sidebyside tbody.mod td.r { background: #fd8 }
.diff table.sidebyside tbody.add td.l { background: #dfd }
.diff table.sidebyside tbody.add td.r { background: #cfc }
.diff table.sidebyside tbody.rem td.l { background: #f88 }
.diff table.sidebyside tbody.rem td.r { background: #faa }
.diff table.sidebyside tbody.mod span.del, .diff table.sidebyside tbody.mod span.ins, .diff table.sidebyside tbody.mod span.chg {
background: #fc0;
}
/* Changeset overview */
#overview .files { padding-top: 2em }
#overview .files ul { margin: 0; padding: 0 }
#overview .files li { list-style-type: none }
#overview .files li .comment { display: none }
#overview .files li div {
border: 1px solid #999;
float: left;
margin: .2em .5em 0 0;
overflow: hidden;
width: .8em; height: .8em;
}
#overview div.add div, #overview div.cp div, #overview div.mv div {
border: 0;
margin: 0;
float: right;
width: .35em;
}
span.ver {font: normal 11px monospace;}
</style></head><body>
"""
HTML_FOOTER = """
</body>
</html>
"""
| gpl-2.0 |
sugarlabs/sugar-toolkit-gtk3 | src/sugar3/graphics/radiotoolbutton.py | 1 | 7756 | # Copyright (C) 2007, Red Hat, Inc.
# Copyright (C) 2007-2008, One Laptop Per Child
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
'''
Provides a RadioToolButton class, similar to a "push" button.
A group of RadioToolButtons can be set, so that only one can be
selected at a time. When a button is clicked, it depresses and
is shaded darker.
It is also possible to set a tooltip to be dispalyed when the
user scrolls over it with their cursor as well as an accelerator
keyboard shortcut.
Example:
.. literalinclude:: ../examples/radiotoolbutton.py
'''
from gi.repository import Gtk
from gi.repository import GObject
from sugar3.graphics.icon import Icon
from sugar3.graphics.palette import Palette, ToolInvoker
from sugar3.graphics import toolbutton
class RadioToolButton(Gtk.RadioToolButton):
'''
The RadioToolButton class manages a Gtk.RadioToolButton styled for
Sugar.
Args:
icon_name (string): name of icon to be used.
Keyword Args:
accelerator (string): keyboard shortcut to be used to
activate this button.
tooltip (string): tooltip to be displayed when user hovers
over button.
xo_color (sugar3.graphics.xocolor.XoColor): XoColor of button.
hide_tooltip_on_click (bool): Whether or not the tooltip
is hidden when user clicks on button.
'''
__gtype_name__ = 'SugarRadioToolButton'
def __init__(self, icon_name=None, **kwargs):
self._accelerator = None
self._tooltip = None
self._xo_color = None
self._hide_tooltip_on_click = True
self._palette_invoker = ToolInvoker()
GObject.GObject.__init__(self, **kwargs)
self._palette_invoker.attach_tool(self)
if icon_name:
self.set_icon_name(icon_name)
# HACK: stop Gtk from adding a label and expanding the size of
# the button. This happen when set_icon_widget is called
# if label_widget is None
self.props.label_widget = Gtk.Box()
self.connect('destroy', self.__destroy_cb)
def __destroy_cb(self, icon):
if self._palette_invoker is not None:
self._palette_invoker.detach()
def set_tooltip(self, tooltip):
'''
Set the tooltip.
Args:
tooltip (string): tooltip to be set.
'''
if self.palette is None or self._tooltip is None:
self.palette = Palette(tooltip)
elif self.palette is not None:
self.palette.set_primary_text(tooltip)
self._tooltip = tooltip
# Set label, shows up when toolbar overflows
Gtk.RadioToolButton.set_label(self, tooltip)
def get_tooltip(self):
'''
Return the tooltip.
'''
return self._tooltip
tooltip = GObject.Property(type=str, setter=set_tooltip,
getter=get_tooltip)
def set_accelerator(self, accelerator):
'''
Set keyboard shortcut that activates this button.
Args:
accelerator (string): accelerator to be set. Should be in
form <modifier>Letter.
'''
self._accelerator = accelerator
toolbutton.setup_accelerator(self)
def get_accelerator(self):
'''
Return accelerator string.
'''
return self._accelerator
accelerator = GObject.Property(type=str, setter=set_accelerator,
getter=get_accelerator)
def set_icon_name(self, icon_name):
'''
Set name of icon.
Args:
icon_name (string): name of icon
'''
icon = Icon(icon_name=icon_name,
xo_color=self._xo_color)
self.set_icon_widget(icon)
icon.show()
def get_icon_name(self):
'''
Return icon name, or None if there is no icon name.
'''
if self.props.icon_widget is not None:
return self.props.icon_widget.props.icon_name
else:
return None
icon_name = GObject.Property(type=str, setter=set_icon_name,
getter=get_icon_name)
def set_xo_color(self, xo_color):
'''
Set XoColor of button icon.
Args:
xo_color (sugar3.graphics.xocolor.XoColor): xocolor to be set.
'''
if self._xo_color != xo_color:
self._xo_color = xo_color
if self.props.icon_widget is not None:
self.props.icon_widget.props.xo_color = xo_color
def get_xo_color(self):
'''
Return xocolor.
'''
return self._xo_color
xo_color = GObject.Property(type=object, setter=set_xo_color,
getter=get_xo_color)
def create_palette(self):
return None
def get_palette(self):
return self._palette_invoker.palette
def set_palette(self, palette):
self._palette_invoker.palette = palette
palette = GObject.Property(
type=object, setter=set_palette, getter=get_palette)
def get_palette_invoker(self):
return self._palette_invoker
def set_palette_invoker(self, palette_invoker):
self._palette_invoker.detach()
self._palette_invoker = palette_invoker
palette_invoker = GObject.Property(
type=object, setter=set_palette_invoker, getter=get_palette_invoker)
def do_draw(self, cr):
'''
Implementation method for drawing the button.
'''
if self.palette and self.palette.is_up():
allocation = self.get_allocation()
# draw a black background, has been done by the engine before
cr.set_source_rgb(0, 0, 0)
cr.rectangle(0, 0, allocation.width, allocation.height)
cr.paint()
Gtk.RadioToolButton.do_draw(self, cr)
if self.palette and self.palette.is_up():
invoker = self.palette.props.invoker
invoker.draw_rectangle(cr, self.palette)
return False
def get_hide_tooltip_on_click(self):
'''
Return True if the tooltip is hidden when a user
clicks on the button, otherwise return False.
'''
return self._hide_tooltip_on_click
def set_hide_tooltip_on_click(self, hide_tooltip_on_click):
'''
Set whether or not the tooltip is hidden when a user
clicks on the button.
Args:
hide_tooltip_on_click (bool): True if the tooltip is
hidden on click, and False otherwise.
'''
if self._hide_tooltip_on_click != hide_tooltip_on_click:
self._hide_tooltip_on_click = hide_tooltip_on_click
hide_tooltip_on_click = GObject.Property(
type=bool, default=True, getter=get_hide_tooltip_on_click,
setter=set_hide_tooltip_on_click)
def do_clicked(self):
'''
Implementation method for hiding the tooltip when
the button is clicked.
'''
if self._hide_tooltip_on_click and self.palette:
self.palette.popdown(True)
| lgpl-2.1 |
JCBarahona/edX | common/lib/xmodule/xmodule/tests/test_poll.py | 227 | 1133 | # -*- coding: utf-8 -*-
"""Test for Poll Xmodule functional logic."""
from xmodule.poll_module import PollDescriptor
from . import LogicTest
class PollModuleTest(LogicTest):
"""Logic tests for Poll Xmodule."""
descriptor_class = PollDescriptor
raw_field_data = {
'poll_answers': {'Yes': 1, 'Dont_know': 0, 'No': 0},
'voted': False,
'poll_answer': ''
}
def test_bad_ajax_request(self):
# Make sure that answer for incorrect request is error json.
response = self.ajax_request('bad_answer', {})
self.assertDictEqual(response, {'error': 'Unknown Command!'})
def test_good_ajax_request(self):
# Make sure that ajax request works correctly.
response = self.ajax_request('No', {})
poll_answers = response['poll_answers']
total = response['total']
callback = response['callback']
self.assertDictEqual(poll_answers, {'Yes': 1, 'Dont_know': 0, 'No': 1})
self.assertEqual(total, 2)
self.assertDictEqual(callback, {'objectName': 'Conditional'})
self.assertEqual(self.xmodule.poll_answer, 'No')
| agpl-3.0 |
hackthemarket/pystrat | sim.py | 1 | 10697 | # simple trading strategy simulator
import pandas as pd
from pandas.tools.plotting import autocorrelation_plot
from pandas.tools.plotting import scatter_matrix
import numpy as np
from scipy import stats
import sklearn
from sklearn import preprocessing as pp
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import interactive
interactive(True)
import sys
import time
import logging as log
log.basicConfig(level=log.DEBUG)
import glob
import os.path
import pickle
import logging as log
log.basicConfig(level=log.DEBUG)
import random
import pdb
pd.set_option('display.width',500)
# define constant friction function
DefaultBPS = 10
def FrictionInBps(U, cfg, kvargs):
""" default FrictionInBps function just returns default,
but the interface receives all strategy info after
strategy is run, so one can create more realistic
impact models """
return DefaultBPS
""" default simulator cfg dictionary.
default keys/values:
FrictionInBps - function that takes same args as strategy.
by default, returns DefaultBps.
InitBal - in $s
Reinvest - should we reinvest our winnings or constantly assume we have InitBal?
Verbose
"""
DEF_SIM_CFG= { 'FrictionInBps': FrictionInBps,
'Verbose' : True,
'InitBal' : 1e7,
'Reinvest' : True }
# columns in prepped univ
SIM_COLS = ["Sym","Product","Instrument",
"Multiplier","Expiry","Strike",
"Open","High","Low","Close","Volume"]
SIM_COLS_OUT = ["Prev_Weight", "Weight", "Prev_Qty", "Qty",
"Trade_Qty", "Trade_Fric", "PNL", "NET_PNL"]
SIM_COL_BALS =[ "NAV","Friction","PNL","NET_PNL", "Longs","Shorts",
"Long_Dlrs","Short_Dlrs","Num_Trades","Turnover","NET_Return"]
def squarem( df, sym='Sym', min_pct=.9 ) :
# sim_squarem solves the common problem in which you have a large table of
# data grouped by symbols, some of which have missing data. You want to
# 'square' the data such that any symbol which is missing 'too much' data
# is expunged and the remaining data is filled appropriately, leaving you
# with a dataset which has the same # of observations for each symbol.
#
bysyms = df.groupby(sym).size()
idx = df.index.unique()
onumsyms = len(bysyms)
minlen = int(round(len(idx) * .9 ))
keep = bysyms[bysyms > minlen]
u = df[ df[sym].isin(keep.index) ]
numsyms = len(keep)
log.info('Got rid of %d/%d symbols',(numsyms-onumsyms),onumsyms)
u.replace(0,np.nan,inplace=True)
u.replace([np.inf, -np.inf], np.nan,inplace=True)
u.sort_index(inplace=True)
uidx = u.index.unique()
# groupby and reindex magic
z = u.groupby(sym).apply(
lambda x: x.reindex(uidx).ffill()).reset_index(0,drop=True)
# badz = z[z.isnull().any(axis=1)]
# if len(badz.index) > 0 :
# badtimes = badz.index.unique().values
# z.drop( badtimes, inplace=True )
# for dt in badtimes:
# log.info('removed %s for NaNs',pd.to_datetime(str(dt)).strftime(
# '%Y-%m-%d'))
return z
def prep_univ( dateTime, symbol,
open, high, low, close, volume,
product, instrument='STK', multiplier=1.0,expiry=None,
strike=None,adv_days=20,sd_days=20, open2close_returns=True,
scaleAndCenter=False, **more_cols) :
# constructs universe appropriate for use with simulator; any additional columns
# passed-in via ellipsis will be added to table as named
#
U = pd.DataFrame({'Sym': symbol,
'Product' : product, 'Instrument':instrument,
'Multiplier': 1.0, 'Expiry': None, 'Strike':None,
'Open':open,'High':high, 'Low':low, 'Close':close,
'Volume':volume }, index=dateTime )
U = U[ SIM_COLS ]
if len(more_cols) > 0:
U = pd.concat( [U, pd.DataFrame(more_cols)], axis=1 )
U.reset_index( inplace=True)
U.sort_values(['Sym','Date'],inplace=True)
U.Date = pd.to_datetime(U.Date)
U.set_index('Date',inplace=True)
if scaleAndCenter :
log.debug('prep_univ: scaling & centering')
raw_scaled = U.groupby('Sym').transform(
lambda x : (x - x.mean())/x.std())
U = pd.concat([ u.Sym, raw_scaled], axis=1)
# calculate adv, returns, fwd_returns & change in volume
U['ADV'] = U.groupby('Sym')['Volume'].apply(
pd.rolling_mean, adv_days, 1).shift()
U['DeltaV'] = U.groupby('Sym')['Volume'].transform(
lambda x : np.log(x / x.shift()) )
U['Return'] = U.groupby('Sym')['Close'].transform(
lambda x : np.log(x / x.shift()) )
U['Fwd_Close'] = U.groupby('Sym')['Close'].shift(-1)
U['Fwd_Return'] = U.groupby('Sym')['Close'].transform(
lambda x : np.log(x / x.shift()).shift(-1) ) # fwd.returns
U['SD'] = U.groupby('Sym')['Return'].apply(
pd.rolling_std, sd_days, 1).shift()
if open2close_returns:
U['Fwd_Open'] = U.groupby('Sym')['Open'].shift(-1)
U['Fwd_COReturn'] = np.divide(np.add( U.Fwd_Open, -U.Close ),U.Close)
U.ffill(inplace=True)
U.sort_index(inplace=True)
return U
# simple, default strategy: equal weight universe on daily basis
def eq_wt( U, cfg, kvargs ) :
#pdb.set_trace()
U.Weight = 1/float(len(U.index))
return U
# given today's Universe U and Yesterday's Y, set U's
# Prev_Weight and Prev_Qty to Y's Weight & Qty
# TODO: clean-up
def _getprevs( U, Y ) :
# TODO: surely there's a cleaner way to do this...
wts = Y.reset_index()[['Sym','Weight']]
wts.columns = ['Sym','Prev_Weight']
pwts = U[['Sym']].merge( wts, on = 'Sym' )['Prev_Weight']
U.Prev_Weight=pwts.values
qts = Y.reset_index()[['Sym','Qty']]
qts.columns = ['Sym','Prev_Qty']
pqts = U[['Sym']].merge( qts, on = 'Sym' )['Prev_Qty']
U.Prev_Qty=pqts.values
# functor to run strategy each day and update tbls ...
# TODO: clean-up
def __sim ( U, FUN, cfg, B, kvargs) :
# run sim to set weights
U = FUN( U, cfg, kvargs)
# set prev values for weight & qty...
Y = kvargs.pop('_Y', None)
if Y is not None and not np.all(Y.index==U.index):
_getprevs(U,Y)
loop = 1 + int(kvargs.pop('_L'))
else:
loop = 0
kvargs['_L'] = loop
kvargs['_Y'] = U
bb = B.iloc[loop]
# fill-out trade details
NAV = bb.NAV
tospend = NAV/U.Weight
U.Qty = np.round((NAV*U.Weight) / (U.Multiplier*U.Close))
U.Trade_Qty = U.Qty - U.Prev_Qty
fbps = 1e-4 * cfg['FrictionInBps'](U,cfg,kvargs)
U.Trade_Fric = U.Trade_Qty * U.Close * U.Multiplier * fbps
U.PNL = (U.Fwd_Close - U.Close) * U.Qty * U.Multiplier
U.NET_PNL = U.PNL - U.Trade_Fric
# today's balances are based on yesterday's posns...
longs = U[U.Qty > 0]
shorts = U[U.Qty < 0]
trades = U[U.Trade_Qty != 0]
bb.Friction = U.Trade_Fric.sum()
bb.PNL = U.PNL.sum()
bb.NET_PNL = U.NET_PNL.sum()
bb.Longs = len(longs.index)
bb.Shorts = len(shorts.index)
bb.Long_Dlrs = (longs.Close * longs.Multiplier * longs.Qty).sum()
bb.Short_Dlrs = (shorts.Close * shorts.Multiplier * shorts.Qty).sum()
bb.Num_Trades = len(trades.index)
bb.Turnover = (trades.Close * trades.Multiplier
* trades.Trade_Qty.abs()).sum()/NAV
if loop > 0 :
yb = B.iloc[loop-1]
ynav = yb.NAV
tnav = ynav + yb.NET_PNL
bb.NAV = tnav
bb.NET_Return = (tnav-ynav)/ynav
B.iloc[loop] = bb
# pdb.set_trace()
return U
def sim( univ, sim_FUN=eq_wt, cfg=DEF_SIM_CFG.copy(), kvargs={} ) :
""" simulator: runs simulation and returns a table of activity and balances.
args:
univ - historical data that's been produced by prep_univ
sim_FUN - strategy function. by default, equal weights univ.
cfg - cfg info. by default
kvargs - strat-specific extra data in a dict
"""
#
t0 = time.time()
all_times = univ.index.unique().values
# prepare writable/output side of universe
W = pd.DataFrame( columns=SIM_COLS_OUT, index = univ.index).fillna(0.0)
U = pd.concat( [univ, W], axis=1 )
# create balances table: one per day
B = pd.DataFrame( columns = SIM_COL_BALS, index = all_times ).fillna(0.0)
B.NAV = cfg['InitBal']
# 'daily' loop
Z = U.groupby(U.index).apply( __sim, FUN=sim_FUN,
cfg=cfg, B=B, kvargs=kvargs )
log.info('ran over %d days and %d rows in %d secs', len(all_times),
len(U.index),time.time()-t0)
# summarize results a bit more...?
#ts=xts(B$Net.Return,order.by=B$DateTime)
# return universe and balances
#list(U=U,B=B, ts=ts)
return Z, B
def sharpe(Returns) :
return np.sqrt(252) * np.mean(Returns)/np.std(Returns)
def random_strat( U, cfg, kvargs ) :
# random portfolio strategy: picks 'num_names' randomly
nnames = kvargs.get('num_names',10)
names = random.sample(U.Sym, nnames )
U.Weight = np.where( U.Sym.isin( names ), 1/float(nnames), 0 )
return U
def best_strat( U, cfg, kvargs ) :
# portfolio strategy: picks 'num_names' based on trailing return
nnames = kvargs.get('num_names',10)
#pdb.set_trace()
best = U.sort_values('Return',ascending=False,
na_position='last')['Sym'].head(10).values
U.Weight = np.where( U.Sym.isin( best ), 1/float(nnames), 0 )
return U
def worst_strat( U, cfg, kvargs ) :
# portfolio strategy: picks 'num_names' based on trailing return
nnames = kvargs.get('num_names',10)
#pdb.set_trace()
worst = U.sort_values('Return',ascending=True,
na_position='last')['Sym'].head(10).values
U.Weight = np.where( U.Sym.isin( worst ), 1/float(nnames), 0 )
return U
def rtest(U,FUN=random_strat, runs=10):
# run given strat repeatedly, plotting NAVs and Returning them
# nb: this only makes sense if the strategy is random...
# run random_strat 'runs' times and plot NAVs
N = None
for i in range(runs) :
_,b = sim( U, sim_FUN=FUN )
n = pd.DataFrame(b.NAV)
N = n if N is None else pd.concat([N,n],axis=1)
N.plot(legend=False)
return N
def sim_test():
# dev driver
f = 'U.pkl'
P = pickle.load(open(f))
log.info('loaded <%s>',f)
P.describe()
U = P[P.index >= '2005-01-01']
U.describe()
import sim
_,B = sim.sim(U)
#plot NAV
B.NAV.plot(title='Equal Weight Everyone')
return B
| gpl-3.0 |
tarzan0820/odoo | addons/account_bank_statement_extensions/report/__init__.py | 415 | 1128 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import bank_statement_balance_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
redhat-openstack/sahara | sahara/service/api.py | 4 | 7744 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from six.moves.urllib import parse as urlparse
from sahara import conductor as c
from sahara import context
from sahara.plugins import base as plugin_base
from sahara.plugins import provisioning
from sahara.service import quotas
from sahara.utils import general as g
from sahara.utils.notification import sender
from sahara.utils.openstack import nova
conductor = c.API
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
OPS = None
def setup_service_api(ops):
global OPS
OPS = ops
# Cluster ops
def get_clusters(**kwargs):
return conductor.cluster_get_all(context.ctx(), **kwargs)
def get_cluster(id, show_progress=False):
return conductor.cluster_get(context.ctx(), id, show_progress)
def scale_cluster(id, data):
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, id)
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
existing_node_groups = data.get('resize_node_groups', [])
additional_node_groups = data.get('add_node_groups', [])
# the next map is the main object we will work with
# to_be_enlarged : {node_group_id: desired_amount_of_instances}
to_be_enlarged = {}
for ng in existing_node_groups:
ng_id = g.find(cluster.node_groups, name=ng['name'])['id']
to_be_enlarged.update({ng_id: ng['count']})
additional = construct_ngs_for_scaling(cluster, additional_node_groups)
cluster = conductor.cluster_get(ctx, cluster)
_add_ports_for_auto_sg(ctx, cluster, plugin)
try:
cluster = g.change_cluster_status(cluster, "Validating")
quotas.check_scaling(cluster, to_be_enlarged, additional)
plugin.validate_scaling(cluster, to_be_enlarged, additional)
except Exception as e:
with excutils.save_and_reraise_exception():
g.clean_cluster_from_empty_ng(cluster)
g.change_cluster_status(cluster, "Active", six.text_type(e))
# If we are here validation is successful.
# So let's update to_be_enlarged map:
to_be_enlarged.update(additional)
for node_group in cluster.node_groups:
if node_group.id not in to_be_enlarged:
to_be_enlarged[node_group.id] = node_group.count
OPS.provision_scaled_cluster(id, to_be_enlarged)
return cluster
def create_cluster(values):
ctx = context.ctx()
cluster = conductor.cluster_create(ctx, values)
sender.notify(ctx, cluster.id, cluster.name, "New",
"create")
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
_add_ports_for_auto_sg(ctx, cluster, plugin)
# validating cluster
try:
cluster = g.change_cluster_status(cluster, "Validating")
quotas.check_cluster(cluster)
plugin.validate(cluster)
except Exception as e:
with excutils.save_and_reraise_exception():
g.change_cluster_status(cluster, "Error",
six.text_type(e))
OPS.provision_cluster(cluster.id)
return cluster
def _add_ports_for_auto_sg(ctx, cluster, plugin):
for ng in cluster.node_groups:
if ng.auto_security_group:
ports = {'open_ports': plugin.get_open_ports(ng)}
conductor.node_group_update(ctx, ng, ports)
def terminate_cluster(id):
cluster = g.change_cluster_status(id, "Deleting")
OPS.terminate_cluster(id)
sender.notify(context.ctx(), cluster.id, cluster.name, cluster.status,
"delete")
# ClusterTemplate ops
def get_cluster_templates(**kwargs):
return conductor.cluster_template_get_all(context.ctx(), **kwargs)
def get_cluster_template(id):
return conductor.cluster_template_get(context.ctx(), id)
def create_cluster_template(values):
return conductor.cluster_template_create(context.ctx(), values)
def terminate_cluster_template(id):
return conductor.cluster_template_destroy(context.ctx(), id)
def update_cluster_template(id, values):
return conductor.cluster_template_update(context.ctx(), id, values)
# NodeGroupTemplate ops
def get_node_group_templates(**kwargs):
return conductor.node_group_template_get_all(context.ctx(), **kwargs)
def get_node_group_template(id):
return conductor.node_group_template_get(context.ctx(), id)
def create_node_group_template(values):
return conductor.node_group_template_create(context.ctx(), values)
def terminate_node_group_template(id):
return conductor.node_group_template_destroy(context.ctx(), id)
def update_node_group_template(id, values):
return conductor.node_group_template_update(context.ctx(), id, values)
# Plugins ops
def get_plugins():
return plugin_base.PLUGINS.get_plugins(
base=provisioning.ProvisioningPluginBase)
def get_plugin(plugin_name, version=None):
plugin = plugin_base.PLUGINS.get_plugin(plugin_name)
if plugin:
res = plugin.as_resource()
if version:
if version in plugin.get_versions():
configs = plugin.get_configs(version)
res._info['configs'] = [c.dict for c in configs]
processes = plugin.get_node_processes(version)
res._info['node_processes'] = processes
required_image_tags = plugin.get_required_image_tags(version)
res._info['required_image_tags'] = required_image_tags
else:
return None
return res
def convert_to_cluster_template(plugin_name, version, template_name,
config_file):
plugin = plugin_base.PLUGINS.get_plugin(plugin_name)
return plugin.convert(config_file, plugin_name, version,
urlparse.unquote(template_name),
conductor.cluster_template_create)
def construct_ngs_for_scaling(cluster, additional_node_groups):
ctx = context.ctx()
additional = {}
for ng in additional_node_groups:
count = ng['count']
ng['count'] = 0
ng_id = conductor.node_group_add(ctx, cluster, ng)
additional.update({ng_id: count})
return additional
# Image Registry
def get_images(name, tags):
return nova.client().images.list_registered(name, tags)
def get_image(**kwargs):
if len(kwargs) == 1 and 'id' in kwargs:
return nova.client().images.get(kwargs['id'])
else:
return nova.client().images.find(**kwargs)
def get_registered_image(id):
return nova.client().images.get_registered_image(id)
def register_image(image_id, username, description=None):
client = nova.client()
client.images.set_description(image_id, username, description)
return client.images.get(image_id)
def unregister_image(image_id):
client = nova.client()
client.images.unset_description(image_id)
return client.images.get(image_id)
def add_image_tags(image_id, tags):
client = nova.client()
client.images.tag(image_id, tags)
return client.images.get(image_id)
def remove_image_tags(image_id, tags):
client = nova.client()
client.images.untag(image_id, tags)
return client.images.get(image_id)
| apache-2.0 |
codecollision/DropboxToFlickr | django/contrib/gis/geos/libgeos.py | 154 | 5541 | """
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import os, re, sys
from ctypes import c_char_p, Structure, CDLL, CFUNCTYPE, POINTER
from ctypes.util import find_library
from django.contrib.gis.geos.error import GEOSException
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError, ImportError):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifiying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if not lib_path is None: break
# No GEOS library could be found.
if lib_path is None:
raise ImportError('Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names))
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
lgeos = CDLL(lib_path)
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst, output_h=sys.stdout):
try:
warn_msg = fmt % lst
except:
warn_msg = fmt
output_h.write('GEOS_NOTICE: %s\n' % warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst, output_h=sys.stderr):
try:
err_msg = fmt % lst
except:
err_msg = fmt
output_h.write('GEOS_ERROR: %s\n' % err_msg)
error_h = ERRORFUNC(error_h)
#### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure): pass
class GEOSPrepGeom_t(Structure): pass
class GEOSCoordSeq_t(Structure): pass
class GEOSContextHandle_t(Structure): pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
# Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection
# GEOS routines
def get_pointer_arr(n):
"Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer."
GeomArr = GEOM_PTR * n
return GeomArr()
# Returns the string version of the GEOS library. Have to set the restype
# explicitly to c_char_p to ensure compatibility accross 32 and 64-bit platforms.
geos_version = lgeos.GEOSversion
geos_version.argtypes = None
geos_version.restype = c_char_p
# Regular expression should be able to parse version strings such as
# '3.0.0rc4-CAPI-1.3.3', or '3.0.0-CAPI-1.4.1'
version_regex = re.compile(r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))(rc(?P<release_candidate>\d+))?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)$')
def geos_version_info():
"""
Returns a dictionary containing the various version metadata parsed from
the GEOS version string, including the version number, whether the version
is a release candidate (and what number release candidate), and the C API
version.
"""
ver = geos_version()
m = version_regex.match(ver)
if not m: raise GEOSException('Could not parse version info string "%s"' % ver)
return dict((key, m.group(key)) for key in ('version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor'))
# Version numbers and whether or not prepared geometry support is available.
_verinfo = geos_version_info()
GEOS_MAJOR_VERSION = int(_verinfo['major'])
GEOS_MINOR_VERSION = int(_verinfo['minor'])
GEOS_SUBMINOR_VERSION = int(_verinfo['subminor'])
del _verinfo
GEOS_VERSION = (GEOS_MAJOR_VERSION, GEOS_MINOR_VERSION, GEOS_SUBMINOR_VERSION)
GEOS_PREPARE = GEOS_VERSION >= (3, 1, 0)
if GEOS_PREPARE:
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
lgeos.initGEOS_r.restype = CONTEXT_PTR
lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
else:
# When thread-safety isn't available, the initGEOS routine must be called
# first. This function takes the notice and error functions, defined
# as Python callbacks above, as parameters. Here is the C code that is
# wrapped:
# extern void GEOS_DLL initGEOS(GEOSMessageHandler notice_function, GEOSMessageHandler error_function);
lgeos.initGEOS(notice_h, error_h)
# Calling finishGEOS() upon exit of the interpreter.
import atexit
atexit.register(lgeos.finishGEOS)
| bsd-3-clause |
OpusVL/odoo | addons/website_customer/__openerp__.py | 313 | 1571 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Customer References',
'category': 'Website',
'website': 'https://www.odoo.com/page/website-builder',
'summary': 'Publish Your Customer References',
'version': '1.0',
'description': """
OpenERP Customer References
===========================
""",
'author': 'OpenERP SA',
'depends': [
'crm_partner_assign',
'website_partner',
'website_google_map',
],
'demo': [
'website_customer_demo.xml',
],
'data': [
'views/website_customer.xml',
],
'qweb': [],
'installable': True,
}
| agpl-3.0 |
mvanotti/mig | tools/delete_unused_queues.py | 18 | 1797 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Contributor: Julien Vehent jvehent@mozilla.com [:ulfr]
# This script connects to the management api of rabbitmq to list scheduler
# queues that have no consumer, indicating that the agent has timed out and
# the scheduler stopped consuming that queue. The script then finds a corresponding
# agent queue and, if it has no consumer either, deletes both queues.
#
# rabbitmq admin credentials can be passed via environment variables:
# $ export RABBITMQ_ADMINUSER="admin"
# $ export RABBITMQ_ADMINPASSWD="password12345"
# $ python delete_unused_queues.py
import requests
import json
import sys
import re
import os
def main():
mqadmin = os.environ['RABBITMQ_ADMINUSER']
mqpasswd = os.environ['RABBITMQ_ADMINPASSWD']
r = requests.get('http://localhost:15672/api/queues/mig', auth=(mqadmin, mqpasswd))
if r.status_code != 200:
print('request failed')
sys.exit(1)
for queue in r.json():
if queue['consumers'] == 0:
try:
agtq = re.search('mig.sched.(.+?)$', queue['name']).group(1)
except AttributeError:
continue
r2 = requests.get('http://localhost:15672/api/queues/mig/mig.agt.'+agtq,
auth=(mqadmin, mqpasswd))
if r2.status_code != 200:
continue
if r2.json()['consumers'] == 0:
print('%s has no consumer' % (agtq))
for loc in ['agt', 'sched']:
r3 = requests.delete('http://localhost:15672/api/queues/mig/mig.'+loc+'.'+agtq,
auth=(mqadmin, mqpasswd))
if r3.status_code == 204:
print('deleted mig.'+loc+'.'+agtq)
else:
print('failed to delete mig.'+loc+'.'+agtq)
if __name__ == "__main__":
main()
| mpl-2.0 |
douglaskastle/bootswatch | convert_bootswatch_mutara.py | 1 | 7972 | import re
import os
values = {
# 'uc': 'Grissom',
'lc': 'mutara',
'header': 'Michroma',
'body': 'Play',
'website': 'mavek_org',
# 'cl': '#116BB7',
}
def main():
src = 'cyborg'
cmd = 'cp -r {0}/* {1}'.format(src, values['lc'])
os.system(cmd)
infile = "{0}/bootswatch.less".format(src)
f = open(infile, 'r')
lines = f.readlines()
f.close()
outfile = values['lc'] + "/bootswatch.less"
f = open(outfile, 'w')
for line in lines:
line = re.sub(src.title(), values['lc'].title(), line)
if re.search("Roboto", line):
continue
if re.search("web-font-path", line):
line = '@web-font-path2: "https://fonts.googleapis.com/css?family={0}:400,700,400italic";\n'.format(values['body']) + line
line = '@web-font-path: "https://fonts.googleapis.com/css?family={0}:300italic,400italic,700italic,400,300,700";\n'.format(values['header']) + line
line = line + '.web-font(@web-font-path2);\n'
f.write(line)
f.close()
infile = "{0}/variables.less".format(src)
f = open(infile, 'r')
lines = f.readlines()
f.close()
swap_list = {
'@brand-primary:': '@brand-primary: #00ff00',
'@brand-success:': '@brand-success: #0000ff',
'@text-color:': '@text-color: #ffffff',
'@headings-color:': '@headings-color: #00ff00',
'@border-radius-base:': '@border-radius-base: 20px',
'@border-radius-large:': '@border-radius-large: 22px',
'@border-radius-small:': '@border-radius-small: 19px',
'@component-active-color:': '@component-active-color: #00ff00',
'@btn-default-color:': '@btn-default-color: #000',
'@btn-default-bg:': '@btn-default-bg: lighten(@gray-dark, 50%)',
'@input-bg:': '@input-bg: @gray-dark',
'@input-group-addon-bg:': '@input-group-addon-bg: @gray-lighter',
'@dropdown-border:': '@dropdown-border: rgba(0,255,0,0.1)',
'@dropdown-divider-bg:': '@dropdown-divider-bg: rgba(0,255,0,0.1)',
'@dropdown-link-color:': '@dropdown-link-color: #00ff00',
'@dropdown-link-hover-color:': '@dropdown-link-hover-color: #00ff00',
'@dropdown-link-active-color:': '@dropdown-link-active-color: #00ff00',
'@navbar-default-link-hover-color:': '@navbar-default-link-hover-color: #00ff00',
'@navbar-default-link-active-color:': '@navbar-default-link-active-color: #00ff00',
'@navbar-default-brand-color:': '@navbar-default-brand-color: #00ff00',
'@navbar-default-brand-hover-color:': '@navbar-default-brand-hover-color: #00ff00',
'@navbar-inverse-link-hover-color:': '@navbar-inverse-link-hover-color: #0000ff',
'@navbar-inverse-brand-color:': '@navbar-inverse-brand-color: #0000ff',
'@navbar-inverse-brand-hover-color:': '@navbar-inverse-brand-hover-color: #0000ff',
'@navbar-inverse-toggle-hover-bg:': '@navbar-inverse-toggle-hover-bg: #8080ff',
'@navbar-inverse-toggle-icon-bar-bg:': '@navbar-inverse-toggle-icon-bar-bg: #0000ff',
'@navbar-inverse-toggle-border-color:': '@navbar-inverse-toggle-border-color: #8080ff',
'@nav-tabs-active-link-hover-color:': '@nav-tabs-active-link-hover-color: #000',
'@pagination-color:': '@pagination-color: #000',
'@pagination-bg:': '@pagination-bg: @gray',
'@pagination-hover-color:': '@pagination-hover-color: #000',
'@pagination-active-color:': '@pagination-active-color: #000',
'@pagination-disabled-bg:': '@pagination-disabled-bg: @gray',
'@state-success-text:': '@state-success-text: #000',
'@state-info-text:': '@state-info-text: #000',
'@state-warning-text:': '@state-warning-text: #000',
'@state-danger-text:': '@state-danger-text: #000',
'@tooltip-bg:': '@tooltip-bg: #000',
'@popover-bg:': '@popover-bg: lighten(@body-bg, 10%)',
'@popover-fallback-border-color:': '@popover-fallback-border-color: #999',
'@popover-arrow-outer-color:': '@popover-arrow-outer-color: fadein(@popover-border-color, 5%)',
'@popover-arrow-outer-fallback-color:': '@popover-arrow-outer-fallback-color: darken(@popover-fallback-border-color, 20%)',
'@label-color:': '@label-color: #000',
'@label-link-hover-color:': '@label-link-hover-color: #000',
'@list-group-link-heading-color:': '@list-group-link-heading-color: #000',
'@panel-primary-text:': '@panel-primary-text: #000',
'@badge-color:': '@badge-color: #000',
'@badge-link-hover-color:': '@badge-link-hover-color: #000',
'@badge-active-bg:': '@badge-active-bg: #000',
'@breadcrumb-color:': '@breadcrumb-color: #00ff00',
'@carousel-control-color:': '@carousel-control-color: #000',
# '': '',
}
outfile = values['lc'] + "/variables.less"
f = open(outfile, 'w')
for line in lines:
line = re.sub(src.title(), values['lc'].title(), line)
line = re.sub(src, values['lc'], line)
#line = re.sub('Roboto', 'Michroma', line)
for s in swap_list.keys():
if re.search(s, line):
line = swap_list[s] + ";\n"
line = re.sub('headings-font-family: @font-family-base', 'headings-font-family: @font-family-header-sans-serif', line)
if re.search("Roboto", line):
line = re.sub('Roboto', '{0}'.format(values['body']), line)
line = '@font-family-header-sans-serif: "{0}", "Helvetica Neue", Helvetica, Arial, sans-serif;\n'.format(values['header']) + line
f.write(line)
f.close()
infile = "{0}/index.html".format(src)
f = open(infile, 'r')
lines = f.readlines()
f.close()
outfile = values['lc'] + "/index.html"
f = open(outfile, 'w')
for line in lines:
line = re.sub(src.title(), values['lc'].title(), line)
line = re.sub(src, values['lc'], line)
line = re.sub('UA-[0-9\-]+', '', line)
if re.search('bootstrap.css" media="screen"', line):
line = line + ' <link rel="stylesheet" href="./bootstrap_fixes.css" media="screen">\n'
f.write(line)
f.close()
grunt = "/cygdrive/c/Users/keeshand/AppData/Roaming/npm/grunt"
cmd = "{0} swatch:{1}".format(grunt, values['lc'])
os.system(cmd)
cmd = "cp {0}/bootstrap.min.css ../{1}/pelican-themes/bootstrap3/static/css/bootstrap.{0}.min.css".format(values['lc'], values['website'])
os.system(cmd)
cmd = "cp {0}/bootstrap_fixes.css ../{1}/pelican-themes/bootstrap3/static/css/bootstrap_fixes.{0}.css".format(values['lc'], values['website'])
os.system(cmd)
if __name__ == '__main__':
main()
| mit |
resmo/ansible | lib/ansible/modules/network/meraki/meraki_admin.py | 19 | 17464 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Kevin Breit (@kbreit) <kevin.breit@kevinbreit.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: meraki_admin
short_description: Manage administrators in the Meraki cloud
version_added: '2.6'
description:
- Allows for creation, management, and visibility into administrators within Meraki.
options:
name:
description:
- Name of the dashboard administrator.
- Required when creating a new administrator.
type: str
email:
description:
- Email address for the dashboard administrator.
- Email cannot be updated.
- Required when creating or editing an administrator.
type: str
org_access:
description:
- Privileges assigned to the administrator in the organization.
aliases: [ orgAccess ]
choices: [ full, none, read-only ]
type: str
tags:
description:
- Tags the administrator has privileges on.
- When creating a new administrator, C(org_name), C(network), or C(tags) must be specified.
- If C(none) is specified, C(network) or C(tags) must be specified.
suboptions:
tag:
description:
- Object tag which privileges should be assigned.
type: str
access:
description:
- The privilege of the dashboard administrator for the tag.
type: str
networks:
description:
- List of networks the administrator has privileges on.
- When creating a new administrator, C(org_name), C(network), or C(tags) must be specified.
suboptions:
id:
description:
- Network ID for which administrator should have privileges assigned.
type: str
access:
description:
- The privilege of the dashboard administrator on the network.
- Valid options are C(full), C(read-only), or C(none).
type: str
state:
description:
- Create or modify, or delete an organization
- If C(state) is C(absent), name takes priority over email if both are specified.
choices: [ absent, present, query ]
required: true
type: str
org_name:
description:
- Name of organization.
- Used when C(name) should refer to another object.
- When creating a new administrator, C(org_name), C(network), or C(tags) must be specified.
aliases: ['organization']
type: str
author:
- Kevin Breit (@kbreit)
extends_documentation_fragment: meraki
'''
EXAMPLES = r'''
- name: Query information about all administrators associated to the organization
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: query
delegate_to: localhost
- name: Query information about a single administrator by name
meraki_admin:
auth_key: abc12345
org_id: 12345
state: query
name: Jane Doe
- name: Query information about a single administrator by email
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: query
email: jane@doe.com
- name: Create new administrator with organization access
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
org_access: read-only
email: jane@doe.com
- name: Create new administrator with organization access
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
org_access: read-only
email: jane@doe.com
- name: Create a new administrator with organization access
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
org_access: read-only
email: jane@doe.com
- name: Revoke access to an organization for an administrator
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: absent
email: jane@doe.com
- name: Create a new administrator with full access to two tags
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
orgAccess: read-only
email: jane@doe.com
tags:
- tag: tenant
access: full
- tag: corporate
access: read-only
- name: Create a new administrator with full access to a network
meraki_admin:
auth_key: abc12345
org_name: YourOrg
state: present
name: Jane Doe
orgAccess: read-only
email: jane@doe.com
networks:
- id: N_12345
access: full
'''
RETURN = r'''
data:
description: List of administrators.
returned: success
type: complex
contains:
email:
description: Email address of administrator.
returned: success
type: str
sample: your@email.com
id:
description: Unique identification number of administrator.
returned: success
type: str
sample: 1234567890
name:
description: Given name of administrator.
returned: success
type: str
sample: John Doe
account_status:
description: Status of account.
returned: success
type: str
sample: ok
two_factor_auth_enabled:
description: Enabled state of two-factor authentication for administrator.
returned: success
type: bool
sample: false
has_api_key:
description: Defines whether administrator has an API assigned to their account.
returned: success
type: bool
sample: false
last_active:
description: Date and time of time the administrator was active within Dashboard.
returned: success
type: str
sample: 2019-01-28 14:58:56 -0800
networks:
description: List of networks administrator has access on.
returned: success
type: complex
contains:
id:
description: The network ID.
returned: when network permissions are set
type: str
sample: N_0123456789
access:
description: Access level of administrator. Options are 'full', 'read-only', or 'none'.
returned: when network permissions are set
type: str
sample: read-only
tags:
description: Tags the administrator has access on.
returned: success
type: complex
contains:
tag:
description: Tag name.
returned: when tag permissions are set
type: str
sample: production
access:
description: Access level of administrator. Options are 'full', 'read-only', or 'none'.
returned: when tag permissions are set
type: str
sample: full
org_access:
description: The privilege of the dashboard administrator on the organization. Options are 'full', 'read-only', or 'none'.
returned: success
type: str
sample: full
'''
import os
from ansible.module_utils.basic import AnsibleModule, json, env_fallback
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_native
from ansible.module_utils.common.dict_transformations import recursive_diff
from ansible.module_utils.network.meraki.meraki import MerakiModule, meraki_argument_spec
def get_admins(meraki, org_id):
admins = meraki.request(
meraki.construct_path(
'query',
function='admin',
org_id=org_id
),
method='GET'
)
if meraki.status == 200:
return admins
def get_admin_id(meraki, data, name=None, email=None):
admin_id = None
for a in data:
if meraki.params['name'] is not None:
if meraki.params['name'] == a['name']:
if admin_id is not None:
meraki.fail_json(msg='There are multiple administrators with the same name')
else:
admin_id = a['id']
elif meraki.params['email']:
if meraki.params['email'] == a['email']:
return a['id']
if admin_id is None:
meraki.fail_json(msg='No admin_id found')
return admin_id
def get_admin(meraki, data, id):
for a in data:
if a['id'] == id:
return a
meraki.fail_json(msg='No admin found by specified name or email')
def find_admin(meraki, data, email):
for a in data:
if a['email'] == email:
return a
return None
def delete_admin(meraki, org_id, admin_id):
path = meraki.construct_path('revoke', 'admin', org_id=org_id) + admin_id
r = meraki.request(path,
method='DELETE'
)
if meraki.status == 204:
return r
def network_factory(meraki, networks, nets):
networks = json.loads(networks)
networks_new = []
for n in networks:
networks_new.append({'id': meraki.get_net_id(org_name=meraki.params['org_name'],
net_name=n['network'],
data=nets),
'access': n['access']
})
return networks_new
def create_admin(meraki, org_id, name, email):
payload = dict()
payload['name'] = name
payload['email'] = email
is_admin_existing = find_admin(meraki, get_admins(meraki, org_id), email)
if meraki.params['org_access'] is not None:
payload['orgAccess'] = meraki.params['org_access']
if meraki.params['tags'] is not None:
payload['tags'] = json.loads(meraki.params['tags'])
if meraki.params['networks'] is not None:
nets = meraki.get_nets(org_id=org_id)
networks = network_factory(meraki, meraki.params['networks'], nets)
payload['networks'] = networks
if is_admin_existing is None: # Create new admin
if meraki.module.check_mode is True:
meraki.result['data'] = payload
meraki.result['changed'] = True
meraki.exit_json(**meraki.result)
path = meraki.construct_path('create', function='admin', org_id=org_id)
r = meraki.request(path,
method='POST',
payload=json.dumps(payload)
)
if meraki.status == 201:
meraki.result['changed'] = True
return r
elif is_admin_existing is not None: # Update existing admin
if not meraki.params['tags']:
payload['tags'] = []
if not meraki.params['networks']:
payload['networks'] = []
if meraki.is_update_required(is_admin_existing, payload) is True:
if meraki.module.check_mode is True:
diff = recursive_diff(is_admin_existing, payload)
is_admin_existing.update(payload)
meraki.result['diff'] = {'before': diff[0],
'after': diff[1],
}
meraki.result['changed'] = True
meraki.result['data'] = payload
meraki.exit_json(**meraki.result)
path = meraki.construct_path('update', function='admin', org_id=org_id) + is_admin_existing['id']
r = meraki.request(path,
method='PUT',
payload=json.dumps(payload)
)
if meraki.status == 200:
meraki.result['changed'] = True
return r
else:
meraki.result['data'] = is_admin_existing
if meraki.module.check_mode is True:
meraki.result['data'] = payload
meraki.exit_json(**meraki.result)
return -1
def main():
# define the available arguments/parameters that a user can pass to
# the module
argument_spec = meraki_argument_spec()
argument_spec.update(state=dict(type='str', choices=['present', 'query', 'absent'], required=True),
name=dict(type='str'),
email=dict(type='str'),
org_access=dict(type='str', aliases=['orgAccess'], choices=['full', 'read-only', 'none']),
tags=dict(type='json'),
networks=dict(type='json'),
org_name=dict(type='str', aliases=['organization']),
org_id=dict(type='str'),
)
# seed the result dict in the object
# we primarily care about changed and state
# change is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
)
meraki = MerakiModule(module, function='admin')
meraki.function = 'admin'
meraki.params['follow_redirects'] = 'all'
query_urls = {'admin': '/organizations/{org_id}/admins',
}
create_urls = {'admin': '/organizations/{org_id}/admins',
}
update_urls = {'admin': '/organizations/{org_id}/admins/',
}
revoke_urls = {'admin': '/organizations/{org_id}/admins/',
}
meraki.url_catalog['query'] = query_urls
meraki.url_catalog['create'] = create_urls
meraki.url_catalog['update'] = update_urls
meraki.url_catalog['revoke'] = revoke_urls
try:
meraki.params['auth_key'] = os.environ['MERAKI_KEY']
except KeyError:
pass
payload = None
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
# execute checks for argument completeness
if meraki.params['state'] == 'query':
meraki.mututally_exclusive = ['name', 'email']
if not meraki.params['org_name'] and not meraki.params['org_id']:
meraki.fail_json(msg='org_name or org_id required')
meraki.required_if = [(['state'], ['absent'], ['email']),
]
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
org_id = meraki.params['org_id']
if not meraki.params['org_id']:
org_id = meraki.get_org_id(meraki.params['org_name'])
if meraki.params['state'] == 'query':
admins = get_admins(meraki, org_id)
if not meraki.params['name'] and not meraki.params['email']: # Return all admins for org
meraki.result['data'] = admins
if meraki.params['name'] is not None: # Return a single admin for org
admin_id = get_admin_id(meraki, admins, name=meraki.params['name'])
meraki.result['data'] = admin_id
admin = get_admin(meraki, admins, admin_id)
meraki.result['data'] = admin
elif meraki.params['email'] is not None:
admin_id = get_admin_id(meraki, admins, email=meraki.params['email'])
meraki.result['data'] = admin_id
admin = get_admin(meraki, admins, admin_id)
meraki.result['data'] = admin
elif meraki.params['state'] == 'present':
r = create_admin(meraki,
org_id,
meraki.params['name'],
meraki.params['email'],
)
if r != -1:
meraki.result['data'] = r
elif meraki.params['state'] == 'absent':
if meraki.module.check_mode is True:
meraki.result['data'] = {}
meraki.result['changed'] = True
meraki.exit_json(**meraki.result)
admin_id = get_admin_id(meraki,
get_admins(meraki, org_id),
email=meraki.params['email']
)
r = delete_admin(meraki, org_id, admin_id)
if r != -1:
meraki.result['data'] = r
meraki.result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
meraki.exit_json(**meraki.result)
if __name__ == '__main__':
main()
| gpl-3.0 |
francbartoli/geonode | geonode/apps.py | 10 | 1313 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2018 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.apps import AppConfig as BaseAppConfig
def run_setup_hooks(*args, **kwargs):
from django.conf import settings
from .celery_app import app as celery_app
if celery_app not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS += (celery_app, )
class AppConfig(BaseAppConfig):
name = "geonode"
label = "geonode"
def ready(self):
super(AppConfig, self).ready()
run_setup_hooks()
| gpl-3.0 |
TRUFIL/erpnext | erpnext/templates/pages/projects.py | 39 | 3258 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
def get_context(context):
project_user = frappe.db.get_value("Project User", {"parent": frappe.form_dict.project, "user": frappe.session.user} , "user")
if not project_user or frappe.session.user == 'Guest':
raise frappe.PermissionError
context.no_cache = 1
context.show_sidebar = True
project = frappe.get_doc('Project', frappe.form_dict.project)
project.has_permission('read')
project.tasks = get_tasks(project.name, start=0, item_status='open',
search=frappe.form_dict.get("search"))
project.timesheets = get_timesheets(project.name, start=0,
search=frappe.form_dict.get("search"))
context.doc = project
def get_tasks(project, start=0, search=None, item_status=None):
filters = {"project": project}
if search:
filters["subject"] = ("like", "%{0}%".format(search))
# if item_status:
# filters["status"] = item_status
tasks = frappe.get_all("Task", filters=filters,
fields=["name", "subject", "status", "_seen", "_comments", "modified", "description"],
limit_start=start, limit_page_length=10)
for task in tasks:
task.todo = frappe.get_all('ToDo',filters={'reference_name':task.name, 'reference_type':'Task'},
fields=["assigned_by", "owner", "modified", "modified_by"])
if task.todo:
task.todo=task.todo[0]
task.todo.user_image = frappe.db.get_value('User', task.todo.owner, 'user_image')
task.comment_count = len(json.loads(task._comments or "[]"))
task.css_seen = ''
if task._seen:
if frappe.session.user in json.loads(task._seen):
task.css_seen = 'seen'
return tasks
@frappe.whitelist()
def get_task_html(project, start=0, item_status=None):
return frappe.render_template("erpnext/templates/includes/projects/project_tasks.html",
{"doc": {
"name": project,
"project_name": project,
"tasks": get_tasks(project, start, item_status=item_status)}
}, is_path=True)
def get_timesheets(project, start=0, search=None):
filters = {"project": project}
if search:
filters["activity_type"] = ("like", "%{0}%".format(search))
timesheets = frappe.get_all('Timesheet Detail', filters=filters,
fields=['project','activity_type','from_time','to_time','parent'],
limit_start=start, limit_page_length=10)
for timesheet in timesheets:
timesheet.infos = frappe.get_all('Timesheet', filters={"name": timesheet.parent},
fields=['name','_comments','_seen','status','modified','modified_by'],
limit_start=start, limit_page_length=10)
for timesheet.info in timesheet.infos:
timesheet.info.user_image = frappe.db.get_value('User', timesheet.info.modified_by, 'user_image')
timesheet.info.comment_count = len(json.loads(timesheet.info._comments or "[]"))
timesheet.info.css_seen = ''
if timesheet.info._seen:
if frappe.session.user in json.loads(timesheet.info._seen):
timesheet.info.css_seen = 'seen'
return timesheets
@frappe.whitelist()
def get_timesheet_html(project, start=0):
return frappe.render_template("erpnext/templates/includes/projects/project_timesheets.html",
{"doc": {"timesheets": get_timesheets(project, start)}}, is_path=True)
| gpl-3.0 |
sony/nnabla | python/src/nnabla/models/imagenet/densenet.py | 1 | 2722 | # Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from nnabla.utils.nnp_graph import NnpNetworkPass
from .base import ImageNetBase
class DenseNet(ImageNetBase):
"""
The following is a list of string that can be specified to ``use_up_to`` option in ``__call__`` method;
* ``'classifier'`` (default): The output of the final affine layer for classification.
* ``'pool'``: The output of the final global average pooling.
* ``'lastconv'``: The output from last denseblock.
* ``'lastconv+relu'``: Network up to ``'lastconv'`` followed by ReLU activation.
References:
* `Huang et al., Densely Connected Convolutional Networks.
<https://arxiv.org/abs/1608.06993>`_
"""
_KEY_VARIABLE = {
'classifier': 'DenseNet/Affine',
'pool': 'DenseNet/AveragePooling_4',
'lastconv': 'DenseNet/BatchNormalization_5',
'lastconv+relu': 'DenseNet/ReLU_5',
}
def __init__(self):
# Load nnp
self._load_nnp('DenseNet-161.nnp', 'DenseNet-161/DenseNet-161.nnp')
def _input_shape(self):
return (3, 224, 224)
def __call__(self, input_var=None, use_from=None, use_up_to='classifier', training=False, force_global_pooling=False, check_global_pooling=True, returns_net=False, verbose=0):
assert use_from is None, 'This should not be set because it is for forward compatibility.'
input_var = self.get_input_var(input_var)
callback = NnpNetworkPass(verbose)
callback.remove_and_rewire('ImageAugmentationX')
callback.set_variable('InputX', input_var)
self.configure_global_average_pooling(
callback, force_global_pooling, check_global_pooling, 'DenseNet/AveragePooling_4')
callback.set_batch_normalization_batch_stat_all(training)
self.use_up_to(use_up_to, callback)
if not training:
callback.fix_parameters()
batch_size = input_var.shape[0]
net = self.nnp.get_network(
'Train', batch_size=batch_size, callback=callback)
if returns_net:
return net
return list(net.outputs.values())[0]
| apache-2.0 |
facebookexperimental/eden | eden/scm/tests/test-convert-authormap-t.py | 2 | 1653 | # Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) Mercurial Contributors.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from testutil.dott import feature, sh, testtmp # noqa: F401
(
sh % "cat"
<< r"""
[extensions]
convert=
"""
>> "$HGRCPATH"
)
# Prepare orig repo
sh % "hg init orig"
sh % "cd orig"
sh % "echo foo" > "foo"
sh % "'HGUSER=user name' hg ci -qAm foo"
sh % "cd .."
# Explicit --authors
sh % "cat" << r"""
user name = Long User Name
# comment
this line is ignored
""" > "authormap.txt"
sh % "hg convert --authors authormap.txt orig new" == r"""
initializing destination new repository
ignoring bad line in author map file authormap.txt: this line is ignored
scanning source...
sorting...
converting...
0 foo
writing author map file $TESTTMP/new/.hg/authormap"""
sh % "cat new/.hg/authormap" == "user name=Long User Name"
sh % "hg -Rnew log" == r"""
commit: d89716e88087
user: Long User Name
date: Thu Jan 01 00:00:00 1970 +0000
summary: foo"""
sh % "rm -rf new"
# Implicit .hg/authormap
sh % "hg init new"
sh % "mv authormap.txt new/.hg/authormap"
sh % "hg convert orig new" == r"""
ignoring bad line in author map file $TESTTMP/new/.hg/authormap: this line is ignored
scanning source...
sorting...
converting...
0 foo"""
sh % "hg -Rnew log" == r"""
commit: d89716e88087
user: Long User Name
date: Thu Jan 01 00:00:00 1970 +0000
summary: foo"""
| gpl-2.0 |
SheffieldML/GPy | GPy/kern/src/todo/spline.py | 19 | 1797 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from kernpart import Kernpart
import numpy as np
from ...core.parameterization import Param
def theta(x):
"""Heaviside step function"""
return np.where(x>=0.,1.,0.)
class Spline(Kernpart):
"""
Spline kernel
:param input_dim: the number of input dimensions (fixed to 1 right now TODO)
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
def __init__(self,input_dim,variance=1.,lengthscale=1.):
self.input_dim = input_dim
assert self.input_dim==1
self.num_params = 1
self.name = 'spline'
self.variance = Param('variance', variance)
self.lengthscale = Param('lengthscale', lengthscale)
self.add_parameters(self.variance, self.lengthscale)
# def _get_params(self):
# return self.variance
#
# def _set_params(self,x):
# self.variance = x
#
# def _get_param_names(self):
# return ['variance']
def K(self,X,X2,target):
assert np.all(X>0), "Spline covariance is for +ve domain only. TODO: symmetrise"
assert np.all(X2>0), "Spline covariance is for +ve domain only. TODO: symmetrise"
t = X
s = X2.T
s_t = s-t # broadcasted subtraction
target += self.variance*(0.5*(t*s**2) - s**3/6. + (s_t)**3*theta(s_t)/6.)
def Kdiag(self,X,target):
target += self.variance*X.flatten()**3/3.
def _param_grad_helper(self,X,X2,target):
target += 0.5*(t*s**2) - s**3/6. + (s_t)**3*theta(s_t)/6.
def dKdiag_dtheta(self,X,target):
target += X.flatten()**3/3.
def dKdiag_dX(self,X,target):
target += self.variance*X**2
| bsd-3-clause |
KokareIITP/django | django/core/management/commands/startapp.py | 513 | 1040 | from importlib import import_module
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
class Command(TemplateCommand):
help = ("Creates a Django app directory structure for the given app "
"name in the current directory or optionally in the given "
"directory.")
missing_args_message = "You must provide an application name."
def handle(self, **options):
app_name, target = options.pop('name'), options.pop('directory')
self.validate_name(app_name, "app")
# Check that the app_name cannot be imported.
try:
import_module(app_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing "
"Python module and cannot be used as an app "
"name. Please try another name." % app_name)
super(Command, self).handle('app', app_name, target, **options)
| bsd-3-clause |
Micronaet/micronaet-accounting | l10n_it_duelist_report_aeroo/__init__.py | 2 | 1215 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010-2012 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
# Copyright(c)2008-2010 SIA "KN dati".(http://kndati.lv) All Rights Reserved.
# General contacts <info@kndati.lv>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import duelist
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Mariusz1970/enigma2 | lib/python/Components/DiskInfo.py | 18 | 1058 | from GUIComponent import GUIComponent
from VariableText import VariableText
from os import statvfs
from enigma import eLabel
# TODO: Harddisk.py has similiar functions, but only similiar.
# fix this to use same code
class DiskInfo(VariableText, GUIComponent):
FREE = 0
USED = 1
SIZE = 2
def __init__(self, path, type, update = True):
GUIComponent.__init__(self)
VariableText.__init__(self)
self.type = type
self.path = path
if update:
self.update()
def update(self):
try:
stat = statvfs(self.path)
except OSError:
return -1
if self.type == self.FREE:
try:
percent = '(' + str((100 * stat.f_bavail) // stat.f_blocks) + '%)'
free = stat.f_bfree * stat.f_bsize
if free < 10000000:
free = _("%d kB") % (free >> 10)
elif free < 10000000000:
free = _("%d MB") % (free >> 20)
else:
free = _("%d Gb") % (free >> 30)
self.setText(" ".join((free, percent, _("free diskspace"))))
except:
# occurs when f_blocks is 0 or a similar error
self.setText("-?-")
GUI_WIDGET = eLabel
| gpl-2.0 |
huikyole/climate | test_smoke.py | 2 | 4433 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pkg_resources import VersionConflict, DistributionNotFound, \
require
from ocw.tests.test_local import create_netcdf_object
from ocw.data_source import local
from ocw import dataset_processor as dsp
import os
import six
if six.PY3:
DEPENDENCIES_FILE = 'deps_py3.txt'
else:
DEPENDENCIES_FILE = 'deps_py2.txt'
SUCCESS_MARK = '\033[92m' + u'\u2713' + '\033[0m'
FAILURE_MARK = '\033[91m' + u'\u274C' + '\033[0m'
def fail(prefix):
print(prefix + " " + FAILURE_MARK)
def success(prefix):
print(prefix + " " + SUCCESS_MARK)
def check_dependencies(file):
''' Verify all necessary dependencies are installed '''
for dep in file:
dep = dep.replace('\n', '')
# skip all comments and blank lines in dependency file
if '#' in dep or not dep:
continue
try:
require(dep)
success(dep)
except DistributionNotFound as df:
fail(dep)
dep = str(df).split(' ')[1][1:-1]
print('\n' + dep + ' dependency missing.')
print('Please install it using "pip/conda install ' + dep + '"')
fail("\nDependencies")
end()
except VersionConflict as vc:
fail(dep)
print("\nRequired version and installed version differ for the "
"following package:\n"
"Required version: " + dep)
dep_name = str(vc).split(' ')[0][1:] # First element is '('
dep_version = str(vc).split(' ')[1]
print("Installed version: " + dep_name + "==" + dep_version)
fail("\nDependencies")
end()
def check_dataset_loading():
''' Try loading test dataset '''
dataset = None
try:
file_path = create_netcdf_object()
dataset = local.load_file(file_path, variable_name='value')
except Exception as e:
fail("\nDataset loading")
print("The following error occured")
print(e)
end(dataset)
success("\nDataset loading")
return dataset
def check_some_dataset_functions(dataset):
''' Run a subset of dataset functions and check for any exception '''
try:
dataset.spatial_boundaries()
dataset.temporal_boundaries()
dataset.spatial_resolution()
except Exception as e:
fail("\nDataset functions")
print("Following error occured:")
print(str(e))
end(dataset)
success("\nDataset functions")
def check_some_dsp_functions(dataset):
'''
Run a subset of dataset processor functions and check for
any kind of exception.
'''
try:
dsp.temporal_rebin(dataset, 'annual')
dsp.ensemble([dataset])
except Exception as e:
fail("\nDataset processor functions")
print("Following error occured:")
print(str(e))
end()
finally:
os.remove(dataset.origin['path'])
success("\nDataset processor functions")
def end(dataset=None):
''' Exit program with status 1 '''
if dataset:
os.remove(dataset.origin['path'])
'End program execution with return code 1'
print('\033[91m' + "Some checks were unsuccessful")
print("Please Fix them and run the test again." + '\033[0m')
exit(1)
def main():
dep_file = open(DEPENDENCIES_FILE, 'r')
print("Checking installed dependencies\n")
check_dependencies(dep_file)
success("\nDependencies")
dataset = check_dataset_loading()
check_some_dataset_functions(dataset)
check_some_dsp_functions(dataset)
success("\nAll checks successfully completed")
return 0
if __name__ == '__main__':
main()
| apache-2.0 |
PopCap/GameIdea | Engine/Source/ThirdParty/HTML5/emsdk/Win64/python/2.7.5.3_64bit/Lib/encodings/iso2022_jp.py | 816 | 1053 | #
# iso2022_jp.py: Python Unicode Codec for ISO2022_JP
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| bsd-2-clause |
robertostling/efselab | scripts/conll2tab.py | 1 | 1221 | # Script to convert CoNLL files with tag+morphology into the simple two-column
# format assumed by efselab.
#
# If only the tag is required, conversion can more easily be done like this:
#
# cut -f 2,4 file.conll >file.tab
"""
cat /home/corpora/SUC3.0/corpus/conll/blogs.conll /home/corpora/SUC3.0/corpus/conll/suc-train.conll | python3 conll2tab.py ne >../suc-data/suc-blogs-ne-train.tab
cat /home/corpora/SUC3.0/corpus/conll/suc-dev.conll | python3 conll2tab.py ne >../suc-data/suc-ne-dev.tab
cat /home/corpora/SUC3.0/corpus/conll/suc-test.conll | python3 conll2tab.py ne >../suc-data/suc-ne-test.tab
"""
import sys
include_ne = 'ne' in sys.argv[1:]
for line in sys.stdin:
fields = line.rstrip('\n').split('\t')
if len(fields) >= 6:
word = fields[1]
pos = fields[3]
if pos == 'LE': pos = 'IN'
tag = pos+'|'+fields[5] if (fields[5] and fields[5] != '_') else pos
if include_ne and len(fields) >= 12:
ne = fields[10] if fields[11] == '_' else (
'%s-%s' % (fields[10], fields[11]))
lemma = fields[2]
print(word+'\t'+lemma+'\t'+tag+'\t'+ne)
else:
print(word+'\t'+tag)
else:
print()
| gpl-3.0 |
procangroup/edx-platform | lms/djangoapps/django_comment_client/tests/test_utils.py | 3 | 80319 | # -*- coding: utf-8 -*-
import datetime
import json
import ddt
import mock
import pytest
from django.core.urlresolvers import reverse
from django.test import RequestFactory, TestCase
from mock import Mock, patch
from nose.plugins.attrib import attr
from pytz import UTC
from six import text_type
import django_comment_client.utils as utils
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from courseware.tabs import get_course_tab_list
from courseware.tests.factories import InstructorFactory
from django_comment_client.constants import TYPE_ENTRY, TYPE_SUBCATEGORY
from django_comment_client.tests.factories import RoleFactory
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_client.tests.utils import config_course_discussions, topic_name_to_id
from django_comment_common.models import (
CourseDiscussionSettings,
ForumsConfig,
assign_role
)
from django_comment_common.utils import (
get_course_discussion_settings,
seed_permissions_roles,
set_course_discussion_settings
)
from lms.djangoapps.teams.tests.factories import CourseTeamFactory
from lms.lib.comment_client.utils import CommentClientMaintenanceError, perform_request
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from openedx.core.djangoapps.course_groups import cohorts
from openedx.core.djangoapps.course_groups.cohorts import set_course_cohorted
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory, config_course_cohorts
from openedx.core.djangoapps.util.testing import ContentGroupTestCase
from student.roles import CourseStaffRole
from student.tests.factories import AdminFactory, CourseEnrollmentFactory, UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_MODULESTORE, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, ToyCourseFactory
@attr(shard=1)
class DictionaryTestCase(TestCase):
def test_extract(self):
d = {'cats': 'meow', 'dogs': 'woof'}
k = ['cats', 'dogs', 'hamsters']
expected = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None}
self.assertEqual(utils.extract(d, k), expected)
def test_strip_none(self):
d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': None}
expected = {'cats': 'meow', 'dogs': 'woof'}
self.assertEqual(utils.strip_none(d), expected)
def test_strip_blank(self):
d = {'cats': 'meow', 'dogs': 'woof', 'hamsters': ' ', 'yetis': ''}
expected = {'cats': 'meow', 'dogs': 'woof'}
self.assertEqual(utils.strip_blank(d), expected)
def test_merge_dict(self):
d1 = {'cats': 'meow', 'dogs': 'woof'}
d2 = {'lions': 'roar', 'ducks': 'quack'}
expected = {'cats': 'meow', 'dogs': 'woof', 'lions': 'roar', 'ducks': 'quack'}
self.assertEqual(utils.merge_dict(d1, d2), expected)
@attr(shard=1)
@pytest.mark.django111_expected_failure
class AccessUtilsTestCase(ModuleStoreTestCase):
"""
Base testcase class for access and roles for the
comment client service integration
"""
CREATE_USER = False
def setUp(self):
super(AccessUtilsTestCase, self).setUp()
self.course = CourseFactory.create()
self.course_id = self.course.id
self.student_role = RoleFactory(name='Student', course_id=self.course_id)
self.moderator_role = RoleFactory(name='Moderator', course_id=self.course_id)
self.community_ta_role = RoleFactory(name='Community TA', course_id=self.course_id)
self.student1 = UserFactory(username='student', email='student@edx.org')
self.student1_enrollment = CourseEnrollmentFactory(user=self.student1)
self.student_role.users.add(self.student1)
self.student2 = UserFactory(username='student2', email='student2@edx.org')
self.student2_enrollment = CourseEnrollmentFactory(user=self.student2)
self.moderator = UserFactory(username='moderator', email='staff@edx.org', is_staff=True)
self.moderator_enrollment = CourseEnrollmentFactory(user=self.moderator)
self.moderator_role.users.add(self.moderator)
self.community_ta1 = UserFactory(username='community_ta1', email='community_ta1@edx.org')
self.community_ta_role.users.add(self.community_ta1)
self.community_ta2 = UserFactory(username='community_ta2', email='community_ta2@edx.org')
self.community_ta_role.users.add(self.community_ta2)
self.course_staff = UserFactory(username='course_staff', email='course_staff@edx.org')
CourseStaffRole(self.course_id).add_users(self.course_staff)
def test_get_role_ids(self):
ret = utils.get_role_ids(self.course_id)
expected = {u'Moderator': [3], u'Community TA': [4, 5]}
self.assertEqual(ret, expected)
def test_has_discussion_privileges(self):
self.assertFalse(utils.has_discussion_privileges(self.student1, self.course_id))
self.assertFalse(utils.has_discussion_privileges(self.student2, self.course_id))
self.assertFalse(utils.has_discussion_privileges(self.course_staff, self.course_id))
self.assertTrue(utils.has_discussion_privileges(self.moderator, self.course_id))
self.assertTrue(utils.has_discussion_privileges(self.community_ta1, self.course_id))
self.assertTrue(utils.has_discussion_privileges(self.community_ta2, self.course_id))
def test_has_forum_access(self):
ret = utils.has_forum_access('student', self.course_id, 'Student')
self.assertTrue(ret)
ret = utils.has_forum_access('not_a_student', self.course_id, 'Student')
self.assertFalse(ret)
ret = utils.has_forum_access('student', self.course_id, 'NotARole')
self.assertFalse(ret)
@ddt.ddt
@attr(shard=1)
class CoursewareContextTestCase(ModuleStoreTestCase):
"""
Base testcase class for courseware context for the
comment client service integration
"""
def setUp(self):
super(CoursewareContextTestCase, self).setUp()
self.course = CourseFactory.create(org="TestX", number="101", display_name="Test Course")
self.discussion1 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion1",
discussion_category="Chapter",
discussion_target="Discussion 1"
)
self.discussion2 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion2",
discussion_category="Chapter / Section / Subsection",
discussion_target="Discussion 2"
)
def test_empty(self):
utils.add_courseware_context([], self.course, self.user)
def test_missing_commentable_id(self):
orig = {"commentable_id": "non-inline"}
modified = dict(orig)
utils.add_courseware_context([modified], self.course, self.user)
self.assertEqual(modified, orig)
def test_basic(self):
threads = [
{"commentable_id": self.discussion1.discussion_id},
{"commentable_id": self.discussion2.discussion_id}
]
utils.add_courseware_context(threads, self.course, self.user)
def assertThreadCorrect(thread, discussion, expected_title): # pylint: disable=invalid-name
"""Asserts that the given thread has the expected set of properties"""
self.assertEqual(
set(thread.keys()),
set(["commentable_id", "courseware_url", "courseware_title"])
)
self.assertEqual(
thread.get("courseware_url"),
reverse(
"jump_to",
kwargs={
"course_id": text_type(self.course.id),
"location": text_type(discussion.location)
}
)
)
self.assertEqual(thread.get("courseware_title"), expected_title)
assertThreadCorrect(threads[0], self.discussion1, "Chapter / Discussion 1")
assertThreadCorrect(threads[1], self.discussion2, "Subsection / Discussion 2")
def test_empty_discussion_subcategory_title(self):
"""
Test that for empty subcategory inline discussion modules,
the divider " / " is not rendered on a post or inline discussion topic label.
"""
discussion = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion",
discussion_category="Chapter",
discussion_target="" # discussion-subcategory
)
thread = {"commentable_id": discussion.discussion_id}
utils.add_courseware_context([thread], self.course, self.user)
self.assertNotIn('/', thread.get("courseware_title"))
@ddt.data((ModuleStoreEnum.Type.mongo, 2), (ModuleStoreEnum.Type.split, 1))
@ddt.unpack
def test_get_accessible_discussion_xblocks(self, modulestore_type, expected_discussion_xblocks):
"""
Tests that the accessible discussion xblocks having no parents do not get fetched for split modulestore.
"""
course = CourseFactory.create(default_store=modulestore_type)
# Create a discussion xblock.
test_discussion = self.store.create_child(self.user.id, course.location, 'discussion', 'test_discussion')
# Assert that created discussion xblock is not an orphan.
self.assertNotIn(test_discussion.location, self.store.get_orphans(course.id))
# Assert that there is only one discussion xblock in the course at the moment.
self.assertEqual(len(utils.get_accessible_discussion_xblocks(course, self.user)), 1)
# Add an orphan discussion xblock to that course
orphan = course.id.make_usage_key('discussion', 'orphan_discussion')
self.store.create_item(self.user.id, orphan.course_key, orphan.block_type, block_id=orphan.block_id)
# Assert that the discussion xblock is an orphan.
self.assertIn(orphan, self.store.get_orphans(course.id))
self.assertEqual(len(utils.get_accessible_discussion_xblocks(course, self.user)), expected_discussion_xblocks)
@attr(shard=3)
class CachedDiscussionIdMapTestCase(ModuleStoreTestCase):
"""
Tests that using the cache of discussion id mappings has the same behavior as searching through the course.
"""
ENABLED_SIGNALS = ['course_published']
def setUp(self):
super(CachedDiscussionIdMapTestCase, self).setUp()
self.course = CourseFactory.create(org='TestX', number='101', display_name='Test Course')
self.discussion = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='test_discussion_id',
discussion_category='Chapter',
discussion_target='Discussion 1'
)
self.discussion2 = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='test_discussion_id_2',
discussion_category='Chapter 2',
discussion_target='Discussion 2'
)
self.private_discussion = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='private_discussion_id',
discussion_category='Chapter 3',
discussion_target='Beta Testing',
visible_to_staff_only=True
)
self.bad_discussion = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='bad_discussion_id',
discussion_category=None,
discussion_target=None
)
def test_cache_returns_correct_key(self):
usage_key = utils.get_cached_discussion_key(self.course.id, 'test_discussion_id')
self.assertEqual(usage_key, self.discussion.location)
def test_cache_returns_none_if_id_is_not_present(self):
usage_key = utils.get_cached_discussion_key(self.course.id, 'bogus_id')
self.assertIsNone(usage_key)
def test_cache_raises_exception_if_course_structure_not_cached(self):
CourseStructure.objects.all().delete()
with self.assertRaises(utils.DiscussionIdMapIsNotCached):
utils.get_cached_discussion_key(self.course.id, 'test_discussion_id')
def test_cache_raises_exception_if_discussion_id_not_cached(self):
cache = CourseStructure.objects.get(course_id=self.course.id)
cache.discussion_id_map_json = None
cache.save()
with self.assertRaises(utils.DiscussionIdMapIsNotCached):
utils.get_cached_discussion_key(self.course.id, 'test_discussion_id')
def test_xblock_does_not_have_required_keys(self):
self.assertTrue(utils.has_required_keys(self.discussion))
self.assertFalse(utils.has_required_keys(self.bad_discussion))
def verify_discussion_metadata(self):
"""Retrieves the metadata for self.discussion and self.discussion2 and verifies that it is correct"""
metadata = utils.get_cached_discussion_id_map(
self.course,
['test_discussion_id', 'test_discussion_id_2'],
self.user
)
discussion1 = metadata[self.discussion.discussion_id]
discussion2 = metadata[self.discussion2.discussion_id]
self.assertEqual(discussion1['location'], self.discussion.location)
self.assertEqual(discussion1['title'], 'Chapter / Discussion 1')
self.assertEqual(discussion2['location'], self.discussion2.location)
self.assertEqual(discussion2['title'], 'Chapter 2 / Discussion 2')
def test_get_discussion_id_map_from_cache(self):
self.verify_discussion_metadata()
def test_get_discussion_id_map_without_cache(self):
CourseStructure.objects.all().delete()
self.verify_discussion_metadata()
def test_get_missing_discussion_id_map_from_cache(self):
metadata = utils.get_cached_discussion_id_map(self.course, ['bogus_id'], self.user)
self.assertEqual(metadata, {})
def test_get_discussion_id_map_from_cache_without_access(self):
user = UserFactory.create()
metadata = utils.get_cached_discussion_id_map(self.course, ['private_discussion_id'], self.user)
self.assertEqual(metadata['private_discussion_id']['title'], 'Chapter 3 / Beta Testing')
metadata = utils.get_cached_discussion_id_map(self.course, ['private_discussion_id'], user)
self.assertEqual(metadata, {})
def test_get_bad_discussion_id(self):
metadata = utils.get_cached_discussion_id_map(self.course, ['bad_discussion_id'], self.user)
self.assertEqual(metadata, {})
def test_discussion_id_accessible(self):
self.assertTrue(utils.discussion_category_id_access(self.course, self.user, 'test_discussion_id'))
def test_bad_discussion_id_not_accessible(self):
self.assertFalse(utils.discussion_category_id_access(self.course, self.user, 'bad_discussion_id'))
def test_missing_discussion_id_not_accessible(self):
self.assertFalse(utils.discussion_category_id_access(self.course, self.user, 'bogus_id'))
def test_discussion_id_not_accessible_without_access(self):
user = UserFactory.create()
self.assertTrue(utils.discussion_category_id_access(self.course, self.user, 'private_discussion_id'))
self.assertFalse(utils.discussion_category_id_access(self.course, user, 'private_discussion_id'))
class CategoryMapTestMixin(object):
"""
Provides functionality for classes that test
`get_discussion_category_map`.
"""
def assert_category_map_equals(self, expected, requesting_user=None):
"""
Call `get_discussion_category_map`, and verify that it returns
what is expected.
"""
self.assertEqual(
utils.get_discussion_category_map(self.course, requesting_user or self.user),
expected
)
@attr(shard=1)
class CategoryMapTestCase(CategoryMapTestMixin, ModuleStoreTestCase):
"""
Base testcase class for discussion categories for the
comment client service integration
"""
def setUp(self):
super(CategoryMapTestCase, self).setUp()
self.course = CourseFactory.create(
org="TestX", number="101", display_name="Test Course",
# This test needs to use a course that has already started --
# discussion topics only show up if the course has already started,
# and the default start date for courses is Jan 1, 2030.
start=datetime.datetime(2012, 2, 3, tzinfo=UTC)
)
# Courses get a default discussion topic on creation, so remove it
self.course.discussion_topics = {}
self.discussion_num = 0
self.instructor = InstructorFactory(course_key=self.course.id)
self.maxDiff = None # pylint: disable=invalid-name
self.later = datetime.datetime(2050, 1, 1, tzinfo=UTC)
def create_discussion(self, discussion_category, discussion_target, **kwargs):
self.discussion_num += 1
return ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion{}".format(self.discussion_num),
discussion_category=discussion_category,
discussion_target=discussion_target,
**kwargs
)
def assert_category_map_equals(self, expected, divided_only_if_explicit=False, exclude_unstarted=True): # pylint: disable=arguments-differ
"""
Asserts the expected map with the map returned by get_discussion_category_map method.
"""
self.assertEqual(
utils.get_discussion_category_map(
self.course, self.instructor, divided_only_if_explicit, exclude_unstarted
),
expected
)
def test_empty(self):
self.assert_category_map_equals({"entries": {}, "subcategories": {}, "children": []})
def test_configured_topics(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
def check_cohorted_topics(expected_ids): # pylint: disable=missing-docstring
self.assert_category_map_equals(
{
"entries": {
"Topic A": {"id": "Topic_A", "sort_key": "Topic A", "is_divided": "Topic_A" in expected_ids},
"Topic B": {"id": "Topic_B", "sort_key": "Topic B", "is_divided": "Topic_B" in expected_ids},
"Topic C": {"id": "Topic_C", "sort_key": "Topic C", "is_divided": "Topic_C" in expected_ids},
},
"subcategories": {},
"children": [("Topic A", TYPE_ENTRY), ("Topic B", TYPE_ENTRY), ("Topic C", TYPE_ENTRY)]
}
)
check_cohorted_topics([]) # default (empty) cohort config
set_discussion_division_settings(self.course.id, enable_cohorts=False)
check_cohorted_topics([])
set_discussion_division_settings(self.course.id, enable_cohorts=True)
check_cohorted_topics([])
set_discussion_division_settings(
self.course.id,
enable_cohorts=True,
divided_discussions=["Topic_B", "Topic_C"]
)
check_cohorted_topics(["Topic_B", "Topic_C"])
set_discussion_division_settings(
self.course.id,
enable_cohorts=True,
divided_discussions=["Topic_A", "Some_Other_Topic"]
)
check_cohorted_topics(["Topic_A"])
# unlikely case, but make sure it works.
set_discussion_division_settings(
self.course.id,
enable_cohorts=False,
divided_discussions=["Topic_A"]
)
check_cohorted_topics([])
def test_single_inline(self):
self.create_discussion("Chapter", "Discussion")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
}
)
def test_inline_with_always_divide_inline_discussion_flag(self):
self.create_discussion("Chapter", "Discussion")
set_discussion_division_settings(self.course.id, enable_cohorts=True, always_divide_inline_discussions=True)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion": {
"id": "discussion1",
"sort_key": None,
"is_divided": True,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
}
)
def test_inline_without_always_divide_inline_discussion_flag(self):
self.create_discussion("Chapter", "Discussion")
set_discussion_division_settings(self.course.id, enable_cohorts=True)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
},
divided_only_if_explicit=True
)
def test_get_unstarted_discussion_xblocks(self):
self.create_discussion("Chapter 1", "Discussion 1", start=self.later)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
"start_date": self.later
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY)],
"start_date": self.later,
"sort_key": "Chapter 1"
}
},
"children": [("Chapter 1", TYPE_SUBCATEGORY)]
},
divided_only_if_explicit=True,
exclude_unstarted=False
)
def test_tree(self):
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 1", "Discussion 2")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion")
self.create_discussion("Chapter 3 / Section 1", "Discussion")
def check_divided(is_divided):
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_divided": is_divided,
},
"Discussion 2": {
"id": "discussion2",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)]
},
"Chapter 2": {
"entries": {
"Discussion": {
"id": "discussion3",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {
"Section 1": {
"entries": {},
"subcategories": {
"Subsection 1": {
"entries": {
"Discussion": {
"id": "discussion4",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
},
"Subsection 2": {
"entries": {
"Discussion": {
"id": "discussion5",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Subsection 1", TYPE_SUBCATEGORY), ("Subsection 2", TYPE_SUBCATEGORY)]
}
},
"children": [("Discussion", TYPE_ENTRY), ("Section 1", TYPE_SUBCATEGORY)]
},
"Chapter 3": {
"entries": {},
"subcategories": {
"Section 1": {
"entries": {
"Discussion": {
"id": "discussion6",
"sort_key": None,
"is_divided": is_divided,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Section 1", TYPE_SUBCATEGORY)]
}
},
"children": [("Chapter 1", TYPE_SUBCATEGORY), ("Chapter 2", TYPE_SUBCATEGORY),
("Chapter 3", TYPE_SUBCATEGORY)]
}
)
# empty / default config
check_divided(False)
# explicitly disabled cohorting
set_discussion_division_settings(self.course.id, enable_cohorts=False)
check_divided(False)
# explicitly enable courses divided by Cohort with inline discusssions also divided.
set_discussion_division_settings(self.course.id, enable_cohorts=True, always_divide_inline_discussions=True)
check_divided(True)
def test_tree_with_duplicate_targets(self):
self.create_discussion("Chapter 1", "Discussion A")
self.create_discussion("Chapter 1", "Discussion B")
self.create_discussion("Chapter 1", "Discussion A") # duplicate
self.create_discussion("Chapter 1", "Discussion A") # another duplicate
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion") # duplicate
category_map = utils.get_discussion_category_map(self.course, self.user)
chapter1 = category_map["subcategories"]["Chapter 1"]
chapter1_discussions = set(["Discussion A", "Discussion B", "Discussion A (1)", "Discussion A (2)"])
chapter1_discussions_with_types = set([("Discussion A", TYPE_ENTRY), ("Discussion B", TYPE_ENTRY),
("Discussion A (1)", TYPE_ENTRY), ("Discussion A (2)", TYPE_ENTRY)])
self.assertEqual(set(chapter1["children"]), chapter1_discussions_with_types)
self.assertEqual(set(chapter1["entries"].keys()), chapter1_discussions)
chapter2 = category_map["subcategories"]["Chapter 2"]
subsection1 = chapter2["subcategories"]["Section 1"]["subcategories"]["Subsection 1"]
subsection1_discussions = set(["Discussion", "Discussion (1)"])
subsection1_discussions_with_types = set([("Discussion", TYPE_ENTRY), ("Discussion (1)", TYPE_ENTRY)])
self.assertEqual(set(subsection1["children"]), subsection1_discussions_with_types)
self.assertEqual(set(subsection1["entries"].keys()), subsection1_discussions)
def test_start_date_filter(self):
now = datetime.datetime.now()
self.create_discussion("Chapter 1", "Discussion 1", start=now)
self.create_discussion("Chapter 1", u"Discussion 2 обсуждение", start=self.later)
self.create_discussion("Chapter 2", "Discussion", start=now)
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion", start=self.later)
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion", start=self.later)
self.create_discussion("Chapter 3 / Section 1", "Discussion", start=self.later)
self.assertFalse(self.course.self_paced)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY)]
},
"Chapter 2": {
"entries": {
"Discussion": {
"id": "discussion3",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Chapter 1", TYPE_SUBCATEGORY), ("Chapter 2", TYPE_SUBCATEGORY)]
}
)
def test_self_paced_start_date_filter(self):
self.course.self_paced = True
now = datetime.datetime.now()
self.create_discussion("Chapter 1", "Discussion 1", start=now)
self.create_discussion("Chapter 1", "Discussion 2", start=self.later)
self.create_discussion("Chapter 2", "Discussion", start=now)
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion", start=self.later)
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion", start=self.later)
self.create_discussion("Chapter 3 / Section 1", "Discussion", start=self.later)
self.assertTrue(self.course.self_paced)
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter 1": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
},
"Discussion 2": {
"id": "discussion2",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)]
},
"Chapter 2": {
"entries": {
"Discussion": {
"id": "discussion3",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {
"Section 1": {
"entries": {},
"subcategories": {
"Subsection 1": {
"entries": {
"Discussion": {
"id": "discussion4",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
},
"Subsection 2": {
"entries": {
"Discussion": {
"id": "discussion5",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Subsection 1", TYPE_SUBCATEGORY), ("Subsection 2", TYPE_SUBCATEGORY)]
}
},
"children": [("Discussion", TYPE_ENTRY), ("Section 1", TYPE_SUBCATEGORY)]
},
"Chapter 3": {
"entries": {},
"subcategories": {
"Section 1": {
"entries": {
"Discussion": {
"id": "discussion6",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Section 1", TYPE_SUBCATEGORY)]
}
},
"children": [("Chapter 1", TYPE_SUBCATEGORY), ("Chapter 2", TYPE_SUBCATEGORY),
("Chapter 3", TYPE_SUBCATEGORY)]
}
)
def test_sort_inline_explicit(self):
self.create_discussion("Chapter", "Discussion 1", sort_key="D")
self.create_discussion("Chapter", "Discussion 2", sort_key="A")
self.create_discussion("Chapter", "Discussion 3", sort_key="E")
self.create_discussion("Chapter", "Discussion 4", sort_key="C")
self.create_discussion("Chapter", "Discussion 5", sort_key="B")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion 1": {
"id": "discussion1",
"sort_key": "D",
"is_divided": False,
},
"Discussion 2": {
"id": "discussion2",
"sort_key": "A",
"is_divided": False,
},
"Discussion 3": {
"id": "discussion3",
"sort_key": "E",
"is_divided": False,
},
"Discussion 4": {
"id": "discussion4",
"sort_key": "C",
"is_divided": False,
},
"Discussion 5": {
"id": "discussion5",
"sort_key": "B",
"is_divided": False,
}
},
"subcategories": {},
"children": [
("Discussion 2", TYPE_ENTRY),
("Discussion 5", TYPE_ENTRY),
("Discussion 4", TYPE_ENTRY),
("Discussion 1", TYPE_ENTRY),
("Discussion 3", TYPE_ENTRY)
]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
}
)
def test_sort_configured_topics_explicit(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A", "sort_key": "B"},
"Topic B": {"id": "Topic_B", "sort_key": "C"},
"Topic C": {"id": "Topic_C", "sort_key": "A"}
}
self.assert_category_map_equals(
{
"entries": {
"Topic A": {"id": "Topic_A", "sort_key": "B", "is_divided": False},
"Topic B": {"id": "Topic_B", "sort_key": "C", "is_divided": False},
"Topic C": {"id": "Topic_C", "sort_key": "A", "is_divided": False},
},
"subcategories": {},
"children": [("Topic C", TYPE_ENTRY), ("Topic A", TYPE_ENTRY), ("Topic B", TYPE_ENTRY)]
}
)
def test_sort_alpha(self):
self.course.discussion_sort_alpha = True
self.create_discussion("Chapter", "Discussion D")
self.create_discussion("Chapter", "Discussion A")
self.create_discussion("Chapter", "Discussion E")
self.create_discussion("Chapter", "Discussion C")
self.create_discussion("Chapter", "Discussion B")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter": {
"entries": {
"Discussion D": {
"id": "discussion1",
"sort_key": "Discussion D",
"is_divided": False,
},
"Discussion A": {
"id": "discussion2",
"sort_key": "Discussion A",
"is_divided": False,
},
"Discussion E": {
"id": "discussion3",
"sort_key": "Discussion E",
"is_divided": False,
},
"Discussion C": {
"id": "discussion4",
"sort_key": "Discussion C",
"is_divided": False,
},
"Discussion B": {
"id": "discussion5",
"sort_key": "Discussion B",
"is_divided": False,
}
},
"subcategories": {},
"children": [
("Discussion A", TYPE_ENTRY),
("Discussion B", TYPE_ENTRY),
("Discussion C", TYPE_ENTRY),
("Discussion D", TYPE_ENTRY),
("Discussion E", TYPE_ENTRY)
]
}
},
"children": [("Chapter", TYPE_SUBCATEGORY)]
}
)
def test_sort_intermediates(self):
self.create_discussion("Chapter B", "Discussion 2")
self.create_discussion("Chapter C", "Discussion")
self.create_discussion("Chapter A", "Discussion 1")
self.create_discussion("Chapter B", "Discussion 1")
self.create_discussion("Chapter A", "Discussion 2")
self.assert_category_map_equals(
{
"entries": {},
"subcategories": {
"Chapter A": {
"entries": {
"Discussion 1": {
"id": "discussion3",
"sort_key": None,
"is_divided": False,
},
"Discussion 2": {
"id": "discussion5",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)]
},
"Chapter B": {
"entries": {
"Discussion 1": {
"id": "discussion4",
"sort_key": None,
"is_divided": False,
},
"Discussion 2": {
"id": "discussion1",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion 1", TYPE_ENTRY), ("Discussion 2", TYPE_ENTRY)]
},
"Chapter C": {
"entries": {
"Discussion": {
"id": "discussion2",
"sort_key": None,
"is_divided": False,
}
},
"subcategories": {},
"children": [("Discussion", TYPE_ENTRY)]
}
},
"children": [("Chapter A", TYPE_SUBCATEGORY), ("Chapter B", TYPE_SUBCATEGORY),
("Chapter C", TYPE_SUBCATEGORY)]
}
)
def test_ids_empty(self):
self.assertEqual(utils.get_discussion_categories_ids(self.course, self.user), [])
def test_ids_configured_topics(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
self.assertItemsEqual(
utils.get_discussion_categories_ids(self.course, self.user),
["Topic_A", "Topic_B", "Topic_C"]
)
def test_ids_inline(self):
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 1", "Discussion 2")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 2", "Discussion")
self.create_discussion("Chapter 3 / Section 1", "Discussion")
self.assertItemsEqual(
utils.get_discussion_categories_ids(self.course, self.user),
["discussion1", "discussion2", "discussion3", "discussion4", "discussion5", "discussion6"]
)
def test_ids_mixed(self):
self.course.discussion_topics = {
"Topic A": {"id": "Topic_A"},
"Topic B": {"id": "Topic_B"},
"Topic C": {"id": "Topic_C"}
}
self.create_discussion("Chapter 1", "Discussion 1")
self.create_discussion("Chapter 2", "Discussion")
self.create_discussion("Chapter 2 / Section 1 / Subsection 1", "Discussion")
self.assertItemsEqual(
utils.get_discussion_categories_ids(self.course, self.user),
["Topic_A", "Topic_B", "Topic_C", "discussion1", "discussion2", "discussion3"]
)
@attr(shard=1)
class ContentGroupCategoryMapTestCase(CategoryMapTestMixin, ContentGroupTestCase):
"""
Tests `get_discussion_category_map` on discussion xblocks which are
only visible to some content groups.
"""
def test_staff_user(self):
"""
Verify that the staff user can access the alpha, beta, and
global discussion topics.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
('Visible to Alpha', 'entry'),
('Visible to Beta', 'entry'),
('Visible to Everyone', 'entry')
],
'entries': {
'Visible to Alpha': {
'sort_key': None,
'is_divided': False,
'id': 'alpha_group_discussion'
},
'Visible to Beta': {
'sort_key': None,
'is_divided': False,
'id': 'beta_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_divided': False,
'id': 'global_group_discussion'
}
}
}
},
'children': [('General', 'entry'), ('Week 1', 'subcategory')],
'entries': {
'General': {
'sort_key': 'General',
'is_divided': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.staff_user
)
def test_alpha_user(self):
"""
Verify that the alpha user can access the alpha and global
discussion topics.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
('Visible to Alpha', 'entry'),
('Visible to Everyone', 'entry')
],
'entries': {
'Visible to Alpha': {
'sort_key': None,
'is_divided': False,
'id': 'alpha_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_divided': False,
'id': 'global_group_discussion'
}
}
}
},
'children': [('General', 'entry'), ('Week 1', 'subcategory')],
'entries': {
'General': {
'sort_key': 'General',
'is_divided': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.alpha_user
)
def test_beta_user(self):
"""
Verify that the beta user can access the beta and global
discussion topics.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
('Visible to Beta', 'entry'),
('Visible to Everyone', 'entry')
],
'entries': {
'Visible to Beta': {
'sort_key': None,
'is_divided': False,
'id': 'beta_group_discussion'
},
'Visible to Everyone': {
'sort_key': None,
'is_divided': False,
'id': 'global_group_discussion'
}
}
}
},
'children': [('General', 'entry'), ('Week 1', 'subcategory')],
'entries': {
'General': {
'sort_key': 'General',
'is_divided': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.beta_user
)
def test_non_cohorted_user(self):
"""
Verify that the non-cohorted user can access the global
discussion topic.
"""
self.assert_category_map_equals(
{
'subcategories': {
'Week 1': {
'subcategories': {},
'children': [
('Visible to Everyone', 'entry')
],
'entries': {
'Visible to Everyone': {
'sort_key': None,
'is_divided': False,
'id': 'global_group_discussion'
}
}
}
},
'children': [('General', 'entry'), ('Week 1', 'subcategory')],
'entries': {
'General': {
'sort_key': 'General',
'is_divided': False,
'id': 'i4x-org-number-course-run'
}
}
},
requesting_user=self.non_cohorted_user
)
class JsonResponseTestCase(TestCase, UnicodeTestMixin):
def _test_unicode_data(self, text):
response = utils.JsonResponse(text)
reparsed = json.loads(response.content)
self.assertEqual(reparsed, text)
class DiscussionTabTestCase(ModuleStoreTestCase):
""" Test visibility of the discussion tab. """
def setUp(self):
super(DiscussionTabTestCase, self).setUp()
self.course = CourseFactory.create()
self.enrolled_user = UserFactory.create()
self.staff_user = AdminFactory.create()
CourseEnrollmentFactory.create(user=self.enrolled_user, course_id=self.course.id)
self.unenrolled_user = UserFactory.create()
def discussion_tab_present(self, user):
""" Returns true if the user has access to the discussion tab. """
request = RequestFactory().request()
request.user = user
all_tabs = get_course_tab_list(request, self.course)
return any(tab.type == 'discussion' for tab in all_tabs)
def test_tab_access(self):
with self.settings(FEATURES={'ENABLE_DISCUSSION_SERVICE': True}):
self.assertTrue(self.discussion_tab_present(self.staff_user))
self.assertTrue(self.discussion_tab_present(self.enrolled_user))
self.assertFalse(self.discussion_tab_present(self.unenrolled_user))
@mock.patch('ccx.overrides.get_current_ccx')
def test_tab_settings(self, mock_get_ccx):
mock_get_ccx.return_value = True
with self.settings(FEATURES={'ENABLE_DISCUSSION_SERVICE': False}):
self.assertFalse(self.discussion_tab_present(self.enrolled_user))
with self.settings(FEATURES={'CUSTOM_COURSES_EDX': True}):
self.assertFalse(self.discussion_tab_present(self.enrolled_user))
class IsCommentableDividedTestCase(ModuleStoreTestCase):
"""
Test the is_commentable_divided function.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Make sure that course is reloaded every time--clear out the modulestore.
"""
super(IsCommentableDividedTestCase, self).setUp()
self.toy_course_key = ToyCourseFactory.create().id
def test_is_commentable_divided(self):
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
def to_id(name):
"""Helper for topic_name_to_id that uses course."""
return topic_name_to_id(course, name)
# no topics
self.assertFalse(
utils.is_commentable_divided(course.id, to_id("General")),
"Course doesn't even have a 'General' topic"
)
# not cohorted
config_course_cohorts(course, is_cohorted=False)
config_course_discussions(course, discussion_topics=["General", "Feedback"])
self.assertFalse(
utils.is_commentable_divided(course.id, to_id("General")),
"Course isn't cohorted"
)
# cohorted, but top level topics aren't
config_course_cohorts(course, is_cohorted=True)
config_course_discussions(course, discussion_topics=["General", "Feedback"])
self.assertTrue(cohorts.is_course_cohorted(course.id))
self.assertFalse(
utils.is_commentable_divided(course.id, to_id("General")),
"Course is cohorted, but 'General' isn't."
)
# cohorted, including "Feedback" top-level topics aren't
config_course_cohorts(
course,
is_cohorted=True
)
config_course_discussions(course, discussion_topics=["General", "Feedback"], divided_discussions=["Feedback"])
self.assertTrue(cohorts.is_course_cohorted(course.id))
self.assertFalse(
utils.is_commentable_divided(course.id, to_id("General")),
"Course is cohorted, but 'General' isn't."
)
self.assertTrue(
utils.is_commentable_divided(course.id, to_id("Feedback")),
"Feedback was listed as cohorted. Should be."
)
def test_is_commentable_divided_inline_discussion(self):
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
def to_id(name): # pylint: disable=missing-docstring
return topic_name_to_id(course, name)
config_course_cohorts(
course,
is_cohorted=True,
)
config_course_discussions(
course,
discussion_topics=["General", "Feedback"],
divided_discussions=["Feedback", "random_inline"]
)
self.assertFalse(
utils.is_commentable_divided(course.id, to_id("random")),
"By default, Non-top-level discussions are not cohorted in a cohorted courses."
)
# if always_divide_inline_discussions is set to False, non-top-level discussion are always
# not divided unless they are explicitly set in divided_discussions
config_course_cohorts(
course,
is_cohorted=True,
)
config_course_discussions(
course,
discussion_topics=["General", "Feedback"],
divided_discussions=["Feedback", "random_inline"],
always_divide_inline_discussions=False
)
self.assertFalse(
utils.is_commentable_divided(course.id, to_id("random")),
"Non-top-level discussion is not cohorted if always_divide_inline_discussions is False."
)
self.assertTrue(
utils.is_commentable_divided(course.id, to_id("random_inline")),
"If always_divide_inline_discussions set to False, Non-top-level discussion is "
"cohorted if explicitly set in cohorted_discussions."
)
self.assertTrue(
utils.is_commentable_divided(course.id, to_id("Feedback")),
"If always_divide_inline_discussions set to False, top-level discussion are not affected."
)
def test_is_commentable_divided_team(self):
course = modulestore().get_course(self.toy_course_key)
self.assertFalse(cohorts.is_course_cohorted(course.id))
config_course_cohorts(course, is_cohorted=True)
config_course_discussions(course, always_divide_inline_discussions=True)
team = CourseTeamFactory(course_id=course.id)
# Verify that team discussions are not cohorted, but other discussions are
# if "always cohort inline discussions" is set to true.
self.assertFalse(utils.is_commentable_divided(course.id, team.discussion_topic_id))
self.assertTrue(utils.is_commentable_divided(course.id, "random"))
def test_is_commentable_divided_cohorts(self):
course = modulestore().get_course(self.toy_course_key)
set_discussion_division_settings(
course.id,
enable_cohorts=True,
divided_discussions=[],
always_divide_inline_discussions=True,
division_scheme=CourseDiscussionSettings.NONE,
)
# Although Cohorts are enabled, discussion division is explicitly disabled.
self.assertFalse(utils.is_commentable_divided(course.id, "random"))
# Now set the discussion division scheme.
set_discussion_division_settings(
course.id,
enable_cohorts=True,
divided_discussions=[],
always_divide_inline_discussions=True,
division_scheme=CourseDiscussionSettings.COHORT,
)
self.assertTrue(utils.is_commentable_divided(course.id, "random"))
def test_is_commentable_divided_enrollment_track(self):
course = modulestore().get_course(self.toy_course_key)
set_discussion_division_settings(
course.id,
divided_discussions=[],
always_divide_inline_discussions=True,
division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK,
)
# Although division scheme is set to ENROLLMENT_TRACK, divided returns
# False because there is only a single enrollment mode.
self.assertFalse(utils.is_commentable_divided(course.id, "random"))
# Now create 2 explicit course modes.
CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=course.id, mode_slug=CourseMode.VERIFIED)
self.assertTrue(utils.is_commentable_divided(course.id, "random"))
@attr(shard=1)
class GroupIdForUserTestCase(ModuleStoreTestCase):
""" Test the get_group_id_for_user method. """
def setUp(self):
super(GroupIdForUserTestCase, self).setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED)
self.test_user = UserFactory.create()
CourseEnrollmentFactory.create(
mode=CourseMode.VERIFIED, user=self.test_user, course_id=self.course.id
)
self.test_cohort = CohortFactory(
course_id=self.course.id,
name='Test Cohort',
users=[self.test_user]
)
def test_discussion_division_disabled(self):
course_discussion_settings = get_course_discussion_settings(self.course.id)
self.assertEqual(CourseDiscussionSettings.NONE, course_discussion_settings.division_scheme)
self.assertIsNone(utils.get_group_id_for_user(self.test_user, course_discussion_settings))
def test_discussion_division_by_cohort(self):
set_discussion_division_settings(
self.course.id, enable_cohorts=True, division_scheme=CourseDiscussionSettings.COHORT
)
course_discussion_settings = get_course_discussion_settings(self.course.id)
self.assertEqual(CourseDiscussionSettings.COHORT, course_discussion_settings.division_scheme)
self.assertEqual(
self.test_cohort.id,
utils.get_group_id_for_user(self.test_user, course_discussion_settings)
)
def test_discussion_division_by_enrollment_track(self):
set_discussion_division_settings(
self.course.id, division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK
)
course_discussion_settings = get_course_discussion_settings(self.course.id)
self.assertEqual(CourseDiscussionSettings.ENROLLMENT_TRACK, course_discussion_settings.division_scheme)
self.assertEqual(
-2, # Verified has group ID 2, and we negate that value to ensure unique IDs
utils.get_group_id_for_user(self.test_user, course_discussion_settings)
)
@attr(shard=1)
class CourseDiscussionDivisionEnabledTestCase(ModuleStoreTestCase):
""" Test the course_discussion_division_enabled and available_division_schemes methods. """
def setUp(self):
super(CourseDiscussionDivisionEnabledTestCase, self).setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)
self.test_cohort = CohortFactory(
course_id=self.course.id,
name='Test Cohort',
users=[]
)
def test_discussion_division_disabled(self):
course_discussion_settings = get_course_discussion_settings(self.course.id)
self.assertFalse(utils.course_discussion_division_enabled(course_discussion_settings))
self.assertEqual([], utils.available_division_schemes(self.course.id))
def test_discussion_division_by_cohort(self):
set_discussion_division_settings(
self.course.id, enable_cohorts=False, division_scheme=CourseDiscussionSettings.COHORT
)
# Because cohorts are disabled, discussion division is not enabled.
self.assertFalse(utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id)))
self.assertEqual([], utils.available_division_schemes(self.course.id))
# Now enable cohorts, which will cause discussions to be divided.
set_discussion_division_settings(
self.course.id, enable_cohorts=True, division_scheme=CourseDiscussionSettings.COHORT
)
self.assertTrue(utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id)))
self.assertEqual([CourseDiscussionSettings.COHORT], utils.available_division_schemes(self.course.id))
def test_discussion_division_by_enrollment_track(self):
set_discussion_division_settings(
self.course.id, division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK
)
# Only a single enrollment track exists, so discussion division is not enabled.
self.assertFalse(utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id)))
self.assertEqual([], utils.available_division_schemes(self.course.id))
# Now create a second CourseMode, which will cause discussions to be divided.
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED)
self.assertTrue(utils.course_discussion_division_enabled(get_course_discussion_settings(self.course.id)))
self.assertEqual([CourseDiscussionSettings.ENROLLMENT_TRACK], utils.available_division_schemes(self.course.id))
@attr(shard=1)
class GroupNameTestCase(ModuleStoreTestCase):
""" Test the get_group_name and get_group_names_by_id methods. """
def setUp(self):
super(GroupNameTestCase, self).setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)
CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED)
self.test_cohort_1 = CohortFactory(
course_id=self.course.id,
name='Cohort 1',
users=[]
)
self.test_cohort_2 = CohortFactory(
course_id=self.course.id,
name='Cohort 2',
users=[]
)
def test_discussion_division_disabled(self):
course_discussion_settings = get_course_discussion_settings(self.course.id)
self.assertEqual({}, utils.get_group_names_by_id(course_discussion_settings))
self.assertIsNone(utils.get_group_name(-1000, course_discussion_settings))
def test_discussion_division_by_cohort(self):
set_discussion_division_settings(
self.course.id, enable_cohorts=True, division_scheme=CourseDiscussionSettings.COHORT
)
course_discussion_settings = get_course_discussion_settings(self.course.id)
self.assertEqual(
{
self.test_cohort_1.id: self.test_cohort_1.name,
self.test_cohort_2.id: self.test_cohort_2.name
},
utils.get_group_names_by_id(course_discussion_settings)
)
self.assertEqual(
self.test_cohort_2.name,
utils.get_group_name(self.test_cohort_2.id, course_discussion_settings)
)
# Test also with a group_id that doesn't exist.
self.assertIsNone(
utils.get_group_name(-1000, course_discussion_settings)
)
def test_discussion_division_by_enrollment_track(self):
set_discussion_division_settings(
self.course.id, division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK
)
course_discussion_settings = get_course_discussion_settings(self.course.id)
self.assertEqual(
{
-1: "audit course",
-2: "verified course"
},
utils.get_group_names_by_id(course_discussion_settings)
)
self.assertEqual(
"verified course",
utils.get_group_name(-2, course_discussion_settings)
)
# Test also with a group_id that doesn't exist.
self.assertIsNone(
utils.get_group_name(-1000, course_discussion_settings)
)
class PermissionsTestCase(ModuleStoreTestCase):
"""Test utils functionality related to forums "abilities" (permissions)"""
def test_get_ability(self):
content = {}
content['user_id'] = '1'
content['type'] = 'thread'
user = mock.Mock()
user.id = 1
with mock.patch('django_comment_client.utils.check_permissions_by_view') as check_perm:
check_perm.return_value = True
self.assertEqual(utils.get_ability(None, content, user), {
'editable': True,
'can_reply': True,
'can_delete': True,
'can_openclose': True,
'can_vote': False,
'can_report': False
})
content['user_id'] = '2'
self.assertEqual(utils.get_ability(None, content, user), {
'editable': True,
'can_reply': True,
'can_delete': True,
'can_openclose': True,
'can_vote': True,
'can_report': True
})
def test_get_ability_with_global_staff(self):
"""
Tests that global staff has rights to report other user's post inspite
of enrolled in the course or not.
"""
content = {'user_id': '1', 'type': 'thread'}
with mock.patch('django_comment_client.utils.check_permissions_by_view') as check_perm:
# check_permissions_by_view returns false because user is not enrolled in the course.
check_perm.return_value = False
global_staff = UserFactory(username='global_staff', email='global_staff@edx.org', is_staff=True)
self.assertEqual(utils.get_ability(None, content, global_staff), {
'editable': False,
'can_reply': False,
'can_delete': False,
'can_openclose': False,
'can_vote': False,
'can_report': True
})
def test_is_content_authored_by(self):
content = {}
user = mock.Mock()
user.id = 1
# strict equality checking
content['user_id'] = 1
self.assertTrue(utils.is_content_authored_by(content, user))
# cast from string to int
content['user_id'] = '1'
self.assertTrue(utils.is_content_authored_by(content, user))
# strict equality checking, fails
content['user_id'] = 2
self.assertFalse(utils.is_content_authored_by(content, user))
# cast from string to int, fails
content['user_id'] = 'string'
self.assertFalse(utils.is_content_authored_by(content, user))
# content has no known author
del content['user_id']
self.assertFalse(utils.is_content_authored_by(content, user))
class GroupModeratorPermissionsTestCase(ModuleStoreTestCase):
"""Test utils functionality related to forums "abilities" (permissions) for group moderators"""
def _check_condition(user, condition, content):
"""
Mocks check_condition method because is_open and is_team_member_if_applicable must always be true
in order to interact with a thread or comment.
"""
return True if condition == 'is_open' or condition == 'is_team_member_if_applicable' else False
def setUp(self):
super(GroupModeratorPermissionsTestCase, self).setUp()
# Create course, seed permissions roles, and create team
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
verified_coursemode = CourseModeFactory.create(
course_id=self.course.id,
mode_slug=CourseMode.VERIFIED
)
audit_coursemode = CourseModeFactory.create(
course_id=self.course.id,
mode_slug=CourseMode.AUDIT
)
# Create four users: group_moderator (who is within the verified enrollment track and in the cohort),
# verified_user (who is in the verified enrollment track but not the cohort),
# cohorted_user (who is in the cohort but not the verified enrollment track),
# and plain_user (who is neither in the cohort nor the verified enrollment track)
self.group_moderator = UserFactory(username='group_moderator', email='group_moderator@edx.org')
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.group_moderator,
mode=verified_coursemode
)
self.verified_user = UserFactory(username='verified', email='verified@edx.org')
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.verified_user,
mode=verified_coursemode
)
self.cohorted_user = UserFactory(username='cohort', email='cohort@edx.org')
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.cohorted_user,
mode=audit_coursemode
)
self.plain_user = UserFactory(username='plain', email='plain@edx.org')
CourseEnrollmentFactory(
course_id=self.course.id,
user=self.plain_user,
mode=audit_coursemode
)
CohortFactory(
course_id=self.course.id,
name='Test Cohort',
users=[self.group_moderator, self.cohorted_user]
)
# Give group moderator permissions to group_moderator
assign_role(self.course.id, self.group_moderator, 'Group Moderator')
@mock.patch('django_comment_client.permissions._check_condition', side_effect=_check_condition)
def test_not_divided(self, check_condition_function):
"""
Group moderator should not have moderator permissions if the discussions are not divided.
"""
content = {'user_id': self.plain_user.id, 'type': 'thread', 'username': self.plain_user.username}
self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), {
'editable': False,
'can_reply': True,
'can_delete': False,
'can_openclose': False,
'can_vote': True,
'can_report': True
})
content = {'user_id': self.cohorted_user.id, 'type': 'thread'}
self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), {
'editable': False,
'can_reply': True,
'can_delete': False,
'can_openclose': False,
'can_vote': True,
'can_report': True
})
content = {'user_id': self.verified_user.id, 'type': 'thread'}
self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), {
'editable': False,
'can_reply': True,
'can_delete': False,
'can_openclose': False,
'can_vote': True,
'can_report': True
})
@mock.patch('django_comment_client.permissions._check_condition', side_effect=_check_condition)
def test_divided_within_group(self, check_condition_function):
"""
Group moderator should have moderator permissions within their group if the discussions are divided.
"""
set_discussion_division_settings(self.course.id, enable_cohorts=True,
division_scheme=CourseDiscussionSettings.COHORT)
content = {'user_id': self.cohorted_user.id, 'type': 'thread', 'username': self.cohorted_user.username}
self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), {
'editable': True,
'can_reply': True,
'can_delete': True,
'can_openclose': True,
'can_vote': True,
'can_report': True
})
set_discussion_division_settings(self.course.id, division_scheme=CourseDiscussionSettings.ENROLLMENT_TRACK)
content = {'user_id': self.verified_user.id, 'type': 'thread', 'username': self.verified_user.username}
self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), {
'editable': True,
'can_reply': True,
'can_delete': True,
'can_openclose': True,
'can_vote': True,
'can_report': True
})
@mock.patch('django_comment_client.permissions._check_condition', side_effect=_check_condition)
def test_divided_outside_group(self, check_condition_function):
"""
Group moderator should not have moderator permissions outside of their group.
"""
content = {'user_id': self.plain_user.id, 'type': 'thread', 'username': self.plain_user.username}
set_discussion_division_settings(self.course.id, division_scheme=CourseDiscussionSettings.NONE)
self.assertEqual(utils.get_ability(self.course.id, content, self.group_moderator), {
'editable': False,
'can_reply': True,
'can_delete': False,
'can_openclose': False,
'can_vote': True,
'can_report': True
})
class ClientConfigurationTestCase(TestCase):
"""Simple test cases to ensure enabling/disabling the use of the comment service works as intended."""
def test_disabled(self):
"""Ensures that an exception is raised when forums are disabled."""
config = ForumsConfig.current()
config.enabled = False
config.save()
with self.assertRaises(CommentClientMaintenanceError):
perform_request('GET', 'http://www.google.com')
@patch('requests.request')
def test_enabled(self, mock_request):
"""Ensures that requests proceed normally when forums are enabled."""
config = ForumsConfig.current()
config.enabled = True
config.save()
response = Mock()
response.status_code = 200
response.json = lambda: {}
mock_request.return_value = response
result = perform_request('GET', 'http://www.google.com')
self.assertEqual(result, {})
def set_discussion_division_settings(
course_key, enable_cohorts=False, always_divide_inline_discussions=False,
divided_discussions=[], division_scheme=CourseDiscussionSettings.COHORT
):
"""
Convenience method for setting cohort enablement and discussion settings.
COHORT is the default division_scheme, as no other schemes were supported at
the time that the unit tests were originally written.
"""
set_course_discussion_settings(
course_key=course_key,
divided_discussions=divided_discussions,
division_scheme=division_scheme,
always_divide_inline_discussions=always_divide_inline_discussions,
)
set_course_cohorted(course_key, enable_cohorts)
| agpl-3.0 |
pjs7678/eve | eve/tests/utils.py | 16 | 14091 | # -*- coding: utf-8 -*-
import copy
import hashlib
from bson.json_util import dumps
from datetime import datetime, timedelta
from eve.tests import TestBase
from eve.utils import parse_request, str_to_date, config, weak_date, \
date_to_str, querydef, document_etag, extract_key_values, \
debug_error_message, validate_filters
class TestUtils(TestBase):
""" collection, document and home_link methods (and resource_uri, which is
used by all of them) are tested in 'tests.methods' since we need an active
flaskapp context
"""
def setUp(self):
super(TestUtils, self).setUp()
self.dt_fmt = config.DATE_FORMAT
self.datestr = 'Tue, 18 Sep 2012 10:12:30 GMT'
self.valid = datetime.strptime(self.datestr, self.dt_fmt)
self.etag = '56eaadbbd9fa287e7270cf13a41083c94f52ab9b'
def test_parse_request_where(self):
self.app.config['DOMAIN'][self.known_resource]['allowed_filters'] = \
['ref']
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).where, None)
with self.app.test_request_context('/?where=hello'):
self.assertEqual(parse_request(self.known_resource).where, 'hello')
def test_parse_request_sort(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).sort, None)
with self.app.test_request_context('/?sort=hello'):
self.assertEqual(parse_request(self.known_resource).sort, 'hello')
def test_parse_request_page(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=2'):
self.assertEqual(parse_request(self.known_resource).page, 2)
with self.app.test_request_context('/?page=-1'):
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=0'):
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=1.1'):
self.assertEqual(parse_request(self.known_resource).page, 1)
with self.app.test_request_context('/?page=string'):
self.assertEqual(parse_request(self.known_resource).page, 1)
def test_parse_request_max_results(self):
default = config.PAGINATION_DEFAULT
limit = config.PAGINATION_LIMIT
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=%d' % (limit + 1)):
self.assertEqual(parse_request(self.known_resource).max_results,
limit)
with self.app.test_request_context('/?max_results=2'):
self.assertEqual(parse_request(self.known_resource).max_results, 2)
with self.app.test_request_context('/?max_results=-1'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=0'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=1.1'):
self.assertEqual(parse_request(self.known_resource).max_results, 1)
with self.app.test_request_context('/?max_results=string'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
def test_parse_request_max_results_disabled_pagination(self):
self.app.config['DOMAIN'][self.known_resource]['pagination'] = False
default = 0
limit = config.PAGINATION_LIMIT
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=%d' % (limit + 1)):
self.assertEqual(parse_request(self.known_resource).max_results,
limit + 1)
with self.app.test_request_context('/?max_results=2'):
self.assertEqual(parse_request(self.known_resource).max_results, 2)
with self.app.test_request_context('/?max_results=-1'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=0'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
with self.app.test_request_context('/?max_results=1.1'):
self.assertEqual(parse_request(self.known_resource).max_results, 1)
with self.app.test_request_context('/?max_results=string'):
self.assertEqual(parse_request(self.known_resource).max_results,
default)
def test_parse_request_if_modified_since(self):
ims = 'If-Modified-Since'
with self.app.test_request_context():
self.assertEqual(parse_request(
self.known_resource).if_modified_since, None)
with self.app.test_request_context(headers=None):
self.assertEqual(
parse_request(self.known_resource).if_modified_since, None)
with self.app.test_request_context(headers={ims: self.datestr}):
self.assertEqual(
parse_request(self.known_resource).if_modified_since,
self.valid + timedelta(seconds=1))
with self.app.test_request_context(headers={ims: 'not-a-date'}):
self.assertRaises(ValueError, parse_request, self.known_resource)
with self.app.test_request_context(
headers={ims:
self.datestr.replace('GMT', 'UTC')}):
self.assertRaises(ValueError, parse_request, self.known_resource)
self.assertRaises(ValueError, parse_request, self.known_resource)
def test_parse_request_if_none_match(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).if_none_match,
None)
with self.app.test_request_context(headers=None):
self.assertEqual(parse_request(self.known_resource).if_none_match,
None)
with self.app.test_request_context(headers={'If-None-Match':
self.etag}):
self.assertEqual(parse_request(self.known_resource).if_none_match,
self.etag)
def test_parse_request_if_match(self):
with self.app.test_request_context():
self.assertEqual(parse_request(self.known_resource).if_match, None)
with self.app.test_request_context(headers=None):
self.assertEqual(parse_request(self.known_resource).if_match, None)
with self.app.test_request_context(headers={'If-Match': self.etag}):
self.assertEqual(parse_request(self.known_resource).if_match,
self.etag)
def test_weak_date(self):
with self.app.test_request_context():
self.app.config['DATE_FORMAT'] = '%Y-%m-%d'
self.assertEqual(weak_date(self.datestr), self.valid +
timedelta(seconds=1))
def test_str_to_date(self):
self.assertEqual(str_to_date(self.datestr), self.valid)
self.assertRaises(ValueError, str_to_date, 'not-a-date')
self.assertRaises(ValueError, str_to_date,
self.datestr.replace('GMT', 'UTC'))
def test_date_to_str(self):
self.assertEqual(date_to_str(self.valid), self.datestr)
def test_querydef(self):
self.assertEqual(querydef(max_results=10), '?max_results=10')
self.assertEqual(querydef(page=10), '?page=10')
self.assertEqual(querydef(where='wherepart'), '?where=wherepart')
self.assertEqual(querydef(sort='sortpart'), '?sort=sortpart')
self.assertEqual(querydef(where='wherepart', sort='sortpart'),
'?where=wherepart&sort=sortpart')
self.assertEqual(querydef(max_results=10, sort='sortpart'),
'?max_results=10&sort=sortpart')
def test_document_etag(self):
test = {'key1': 'value1', 'another': 'value2'}
challenge = dumps(test, sort_keys=True).encode('utf-8')
with self.app.test_request_context():
self.assertEqual(hashlib.sha1(challenge).hexdigest(),
document_etag(test))
def test_document_etag_ignore_fields(self):
test = {'key1': 'value1', 'key2': 'value2'}
ignore_fields = ["key2"]
test_without_ignore = {'key1': 'value1'}
challenge = dumps(test_without_ignore, sort_keys=True).encode('utf-8')
with self.app.test_request_context():
self.assertEqual(hashlib.sha1(challenge).hexdigest(),
document_etag(test, ignore_fields))
# not required fields can not be present
test = {'key1': 'value1', 'key2': 'value2'}
ignore_fields = ["key3"]
test_without_ignore = {'key1': 'value1', 'key2': 'value2'}
challenge = dumps(test_without_ignore, sort_keys=True).encode('utf-8')
with self.app.test_request_context():
self.assertEqual(hashlib.sha1(challenge).hexdigest(),
document_etag(test, ignore_fields))
# ignore fiels nested using doting notation
test = {'key1': 'value1', 'dict': {'key2': 'value2', 'key3': 'value3'}}
ignore_fields = ['dict.key2']
test_without_ignore = {'key1': 'value1', 'dict': {'key3': 'value3'}}
challenge = dumps(test_without_ignore, sort_keys=True).encode('utf-8')
with self.app.test_request_context():
self.assertEqual(hashlib.sha1(challenge).hexdigest(),
document_etag(test, ignore_fields))
def test_extract_key_values(self):
test = {
'key1': 'value1',
'key2': {
'key1': 'value2',
'nested': {
'key1': 'value3'
}
}
}
self.assertEqual(list(extract_key_values('key1', test)),
['value1', 'value2', 'value3'])
def test_debug_error_message(self):
with self.app.test_request_context():
self.app.config['DEBUG'] = False
self.assertEqual(debug_error_message('An error message'), None)
self.app.config['DEBUG'] = True
self.assertEqual(debug_error_message('An error message'),
'An error message')
def test_validate_filters(self):
self.app.config['DOMAIN'][self.known_resource]['allowed_filters'] = []
with self.app.test_request_context():
self.assertTrue('key' in validate_filters(
{'key': 'val'},
self.known_resource))
self.assertTrue('key' in validate_filters(
{'key': ['val1', 'val2']},
self.known_resource))
self.assertTrue('key' in validate_filters(
{'key': {'$in': ['val1', 'val2']}},
self.known_resource))
self.assertTrue('key' in validate_filters(
{'$or': [{'key': 'val1'}, {'key': 'val2'}]},
self.known_resource))
self.assertTrue('$or' in validate_filters(
{'$or': 'val'},
self.known_resource))
self.assertTrue('$or' in validate_filters(
{'$or': {'key': 'val1'}},
self.known_resource))
self.assertTrue('$or' in validate_filters(
{'$or': ['val']},
self.known_resource))
self.app.config['DOMAIN'][self.known_resource]['allowed_filters'] = \
['key']
with self.app.test_request_context():
self.assertTrue(validate_filters(
{'key': 'val'},
self.known_resource) is None)
self.assertTrue(validate_filters(
{'key': ['val1', 'val2']},
self.known_resource) is None)
self.assertTrue(validate_filters(
{'key': {'$in': ['val1', 'val2']}},
self.known_resource) is None)
self.assertTrue(validate_filters(
{'$or': [{'key': 'val1'}, {'key': 'val2'}]},
self.known_resource) is None)
class DummyEvent(object):
"""
Even handler that records the call parameters and asserts a check
Usage::
app = Eve()
app.on_my_event = DummyEvent(element_not_deleted)
In the test::
assert app.on_my_event.called[0] == expected_param_0
"""
def __init__(self, check, deepcopy=False):
"""
:param check: method checking the state of something during the event.
:type: check: callable returning bool
:param deepcopy: Do we need to store a copy of the argument calls? In
some events arguments are changed after the event, so keeping a
reference to the original object doesn't allow a test to check what
was passed. The default is False.
:type deepcopy: bool
"""
self.__called = None
self.__check = check
self.__deepcopy = deepcopy
def __call__(self, *args):
assert self.__check()
# In some method the arguments are changed after the events
if self.__deepcopy:
args = copy.deepcopy(args)
self.__called = args
@property
def called(self):
"""
The results of the call to the event.
:rtype: It returns None if the event hasn't been called or a tuple with
the positional arguments of the last call if called.
"""
return self.__called
| bsd-3-clause |
sdlBasic/sdlbrt | win32/mingw/opt/lib/python2.7/encodings/utf_16_le.py | 860 | 1037 | """ Python 'utf-16-le' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_le_encode
def decode(input, errors='strict'):
return codecs.utf_16_le_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_le_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_le_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_le_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_le_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-le',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| lgpl-2.1 |
alexeyum/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
keen99/SickRage | lib/ndg/httpsclient/test/__init__.py | 93 | 1077 | """Unit tests package for ndg_httpsclient
PyOpenSSL utility to make a httplib-like interface suitable for use with
urllib2
"""
__author__ = "P J Kershaw (STFC)"
__date__ = "05/01/12"
__copyright__ = "(C) 2012 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "Philip.Kershaw@stfc.ac.uk"
__revision__ = '$Id$'
import os
import unittest
class Constants(object):
'''Convenience base class from which other unit tests can extend. Its
sets the generic data directory path'''
PORT = 4443
PORT2 = 4444
HOSTNAME = 'localhost'
TEST_URI = 'https://%s:%d' % (HOSTNAME, PORT)
TEST_URI2 = 'https://%s:%d' % (HOSTNAME, PORT2)
UNITTEST_DIR = os.path.dirname(os.path.abspath(__file__))
CACERT_DIR = os.path.join(UNITTEST_DIR, 'pki', 'ca')
SSL_CERT_FILENAME = 'localhost.crt'
SSL_CERT_FILEPATH = os.path.join(UNITTEST_DIR, 'pki', SSL_CERT_FILENAME)
SSL_PRIKEY_FILENAME = 'localhost.key'
SSL_PRIKEY_FILEPATH = os.path.join(UNITTEST_DIR, 'pki', SSL_PRIKEY_FILENAME)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.