repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Allow2CEO/browser-ios | brave/node_modules/ad-block/vendor/depot_tools/third_party/logilab/common/cli.py | 63 | 7042 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Command line interface helper classes.
It provides some default commands, a help system, a default readline
configuration with completion and persistent history.
Example::
class BookShell(CLIHelper):
def __init__(self):
# quit and help are builtins
# CMD_MAP keys are commands, values are topics
self.CMD_MAP['pionce'] = _("Sommeil")
self.CMD_MAP['ronfle'] = _("Sommeil")
CLIHelper.__init__(self)
help_do_pionce = ("pionce", "pionce duree", _("met ton corps en veille"))
def do_pionce(self):
print('nap is good')
help_do_ronfle = ("ronfle", "ronfle volume", _("met les autres en veille"))
def do_ronfle(self):
print('fuuuuuuuuuuuu rhhhhhrhrhrrh')
cl = BookShell()
"""
from __future__ import print_function
__docformat__ = "restructuredtext en"
from six.moves import builtins, input
if not hasattr(builtins, '_'):
builtins._ = str
def init_readline(complete_method, histfile=None):
"""Init the readline library if available."""
try:
import readline
readline.parse_and_bind("tab: complete")
readline.set_completer(complete_method)
string = readline.get_completer_delims().replace(':', '')
readline.set_completer_delims(string)
if histfile is not None:
try:
readline.read_history_file(histfile)
except IOError:
pass
import atexit
atexit.register(readline.write_history_file, histfile)
except:
print('readline is not available :-(')
class Completer :
"""Readline completer."""
def __init__(self, commands):
self.list = commands
def complete(self, text, state):
"""Hook called by readline when <tab> is pressed."""
n = len(text)
matches = []
for cmd in self.list :
if cmd[:n] == text :
matches.append(cmd)
try:
return matches[state]
except IndexError:
return None
class CLIHelper:
"""An abstract command line interface client which recognize commands
and provide an help system.
"""
CMD_MAP = {'help': _("Others"),
'quit': _("Others"),
}
CMD_PREFIX = ''
def __init__(self, histfile=None) :
self._topics = {}
self.commands = None
self._completer = Completer(self._register_commands())
init_readline(self._completer.complete, histfile)
def run(self):
"""loop on user input, exit on EOF"""
while True:
try:
line = input('>>> ')
except EOFError:
print
break
s_line = line.strip()
if not s_line:
continue
args = s_line.split()
if args[0] in self.commands:
try:
cmd = 'do_%s' % self.commands[args[0]]
getattr(self, cmd)(*args[1:])
except EOFError:
break
except:
import traceback
traceback.print_exc()
else:
try:
self.handle_line(s_line)
except:
import traceback
traceback.print_exc()
def handle_line(self, stripped_line):
"""Method to overload in the concrete class (should handle
lines which are not commands).
"""
raise NotImplementedError()
# private methods #########################################################
def _register_commands(self):
""" register available commands method and return the list of
commands name
"""
self.commands = {}
self._command_help = {}
commands = [attr[3:] for attr in dir(self) if attr[:3] == 'do_']
for command in commands:
topic = self.CMD_MAP[command]
help_method = getattr(self, 'help_do_%s' % command)
self._topics.setdefault(topic, []).append(help_method)
self.commands[self.CMD_PREFIX + command] = command
self._command_help[command] = help_method
return self.commands.keys()
def _print_help(self, cmd, syntax, explanation):
print(_('Command %s') % cmd)
print(_('Syntax: %s') % syntax)
print('\t', explanation)
print()
# predefined commands #####################################################
def do_help(self, command=None) :
"""base input of the help system"""
if command in self._command_help:
self._print_help(*self._command_help[command])
elif command is None or command not in self._topics:
print(_("Use help <topic> or help <command>."))
print(_("Available topics are:"))
topics = sorted(self._topics.keys())
for topic in topics:
print('\t', topic)
print()
print(_("Available commands are:"))
commands = self.commands.keys()
commands.sort()
for command in commands:
print('\t', command[len(self.CMD_PREFIX):])
else:
print(_('Available commands about %s:') % command)
print
for command_help_method in self._topics[command]:
try:
if callable(command_help_method):
self._print_help(*command_help_method())
else:
self._print_help(*command_help_method)
except:
import traceback
traceback.print_exc()
print('ERROR in help method %s'% (
command_help_method.__name__))
help_do_help = ("help", "help [topic|command]",
_("print help message for the given topic/command or \
available topics when no argument"))
def do_quit(self):
"""quit the CLI"""
raise EOFError()
def help_do_quit(self):
return ("quit", "quit", _("quit the application"))
| mpl-2.0 |
popazerty/openblackhole-SH4 | lib/python/Plugins/SystemPlugins/SoftwareManager/Flash_online.py | 2 | 21333 | from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
from Components.Button import Button
from Components.Label import Label
from Components.ActionMap import ActionMap
from Components.MenuList import MenuList
from Components.FileList import FileList
from Components.Task import Task, Job, job_manager, Condition
from Components.Sources.StaticText import StaticText
from Screens.Console import Console
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Screens.Console import Console
from Screens.HelpMenu import HelpableScreen
from Screens.TaskView import JobView
from Tools.Downloader import downloadWithProgress
import urllib2
import os
import shutil
import math
from boxbranding import getBoxType, getImageDistro, getMachineName, getMachineBrand, getBrandOEM
distro = getImageDistro()
#############################################################################################################
image = 0 # 0=openATV / 1=openMips 2=opendroid
if distro.lower() == "openmips":
image = 1
elif distro.lower() == "openatv":
image = 0
elif distro.lower() == "opendroid":
image = 2
feedurl_atv = 'http://images.mynonpublic.com/openatv/nightly'
feedurl_om = 'http://image.openmips.com/2.0'
feedurl_opendroid = 'http://www.droidsat.org/image'
feedurl_opendroid = 'http://www.droidsat.org/image/opendroid'
imagePath = '/media/hdd/images'
flashPath = '/media/hdd/images/flash'
flashTmp = '/media/hdd/images/tmp'
ofgwritePath = '/usr/bin/ofgwrite'
#############################################################################################################
def Freespace(dev):
statdev = os.statvfs(dev)
space = (statdev.f_bavail * statdev.f_frsize) / 1024
print "[Flash Online] Free space on %s = %i kilobytes" %(dev, space)
return space
class FlashOnline(Screen):
skin = """
<screen position="center,center" size="560,400" title="Flash On the Fly">
<ePixmap position="0,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap position="140,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<ePixmap position="280,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/yellow.png" transparent="1" alphatest="on" />
<ePixmap position="420,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget name="key_red" position="0,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_green" position="140,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_yellow" position="280,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_blue" position="420,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="info-online" position="10,30" zPosition="1" size="450,100" font="Regular;20" halign="left" valign="top" transparent="1" />
<widget name="info-local" position="10,150" zPosition="1" size="450,200" font="Regular;20" halign="left" valign="top" transparent="1" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("Flash On the Fly"))
self["key_yellow"] = Button("Local")
self["key_green"] = Button("Online")
self["key_red"] = Button(_("Exit"))
self["key_blue"] = Button("")
self["info-local"] = Label(_("Local = Flash a image from local path /hdd/images"))
self["info-online"] = Label(_("Online = Download a image and flash it"))
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"blue": self.blue,
"yellow": self.yellow,
"green": self.green,
"red": self.quit,
"cancel": self.quit,
}, -2)
def check_hdd(self):
if not os.path.exists("/media/hdd"):
self.session.open(MessageBox, _("No /hdd found !!\nPlease make sure you have a HDD mounted.\n\nExit plugin."), type = MessageBox.TYPE_ERROR)
return False
if Freespace('/media/hdd') < 300000:
self.session.open(MessageBox, _("Not enough free space on /hdd !!\nYou need at least 300Mb free space.\n\nExit plugin."), type = MessageBox.TYPE_ERROR)
return False
if not os.path.exists(ofgwritePath):
self.session.open(MessageBox, _('ofgwrite not found !!\nPlease make sure you have ofgwrite installed in /usr/bin/ofgwrite.\n\nExit plugin.'), type = MessageBox.TYPE_ERROR)
return False
if not os.path.exists(imagePath):
try:
os.mkdir(imagePath)
except:
pass
if os.path.exists(flashPath):
try:
os.system('rm -rf ' + flashPath)
except:
pass
try:
os.mkdir(flashPath)
except:
pass
return True
def quit(self):
self.close()
def blue(self):
pass
def green(self):
if self.check_hdd():
self.session.open(doFlashImage, online = True)
else:
self.close()
def yellow(self):
if self.check_hdd():
self.session.open(doFlashImage, online = False)
else:
self.close()
class doFlashImage(Screen):
skin = """
<screen position="center,center" size="700,500" title="Flash On the fly (select a image)">
<ePixmap position="0,460" zPosition="1" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap position="140,460" zPosition="1" size="140,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<ePixmap position="280,460" zPosition="1" size="140,40" pixmap="skin_default/buttons/yellow.png" transparent="1" alphatest="on" />
<ePixmap position="420,460" zPosition="1" size="140,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget name="key_red" position="0,460" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_green" position="140,460" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_yellow" position="280,460" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_blue" position="420,460" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="imageList" position="10,10" zPosition="1" size="680,450" font="Regular;20" scrollbarMode="showOnDemand" transparent="1" />
</screen>"""
def __init__(self, session, online ):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("Flash On the fly (select a image)"))
self["key_green"] = Button(_("Flash"))
self["key_red"] = Button(_("Exit"))
self["key_blue"] = Button("")
self["key_yellow"] = Button("")
self.filename = None
self.imagelist = []
self.simulate = False
self.Online = online
self.imagePath = imagePath
self.feedurl = feedurl_opendroid
if image == 0:
self.feed = "atv"
else:
self.feed = "opendroid"
self["imageList"] = MenuList(self.imagelist)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"green": self.green,
"ok": self.green,
"yellow": self.yellow,
"red": self.quit,
"blue": self.blue,
"cancel": self.quit,
}, -2)
self.onLayoutFinish.append(self.layoutFinished)
def quit(self):
self.close()
def blue(self):
if self.Online:
if image == 2:
if self.feed == "opendroid":
self.feed = "opendroid"
else:
self.feed = "opendroid"
self.layoutFinished()
return
sel = self["imageList"].l.getCurrentSelection()
if sel == None:
print"Nothing to select !!"
return
self.filename = sel
self.session.openWithCallback(self.RemoveCB, MessageBox, _("Do you really want to delete\n%s ?") % (sel), MessageBox.TYPE_YESNO)
def RemoveCB(self, ret):
if ret:
if os.path.exists(self.imagePath + "/" + self.filename):
os.remove(self.imagePath + "/" + self.filename)
self.imagelist.remove(self.filename)
self["imageList"].l.setList(self.imagelist)
def box(self):
box = getBoxType()
machinename = getMachineName()
if box in ('uniboxhd1', 'uniboxhd2', 'uniboxhd3'):
box = "ventonhdx"
elif box == 'odinm6':
box = getMachineName().lower()
elif box == "inihde" and machinename.lower() == "xpeedlx":
box = "xpeedlx"
elif box in ('xpeedlx1', 'xpeedlx2'):
box = "xpeedlx"
elif box == "inihde" and machinename.lower() == "hd-1000":
box = "sezam-1000hd"
elif box == "ventonhdx" and machinename.lower() == "hd-5000":
box = "sezam-5000hd"
elif box == "ventonhdx" and machinename.lower() == "premium twin":
box = "miraclebox-twin"
elif box == "xp1000" and machinename.lower() == "sf8 hd":
box = "sf8"
elif box.startswith('et') and not box in ('et8000', 'et8500', 'et8500s', 'et10000'):
box = box[0:3] + 'x00'
elif box == 'odinm9' and self.feed == "opendroid":
box = 'maram9'
return box
def green(self):
sel = self["imageList"].l.getCurrentSelection()
if sel == None:
print"Nothing to select !!"
return
file_name = self.imagePath + "/" + sel
self.filename = file_name
box = self.box()
self.hide()
if self.Online:
if self.feed == "opendroid":
url = self.feedurl + "/" + sel
else:
url = self.feedurl + "/" + box + "/" + sel
#print url
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (sel, file_size)
job = ImageDownloadJob(url, file_name, sel)
job.afterEvent = "close"
job_manager.AddJob(job)
job_manager.failed_jobs = []
self.session.openWithCallback(self.ImageDownloadCB, JobView, job, backgroundable = False, afterEventChangeable = False)
else:
if sel == str(flashTmp):
self.Start_Flashing()
else:
self.unzip_image(self.filename, flashPath)
def ImageDownloadCB(self, ret):
if ret:
return
if job_manager.active_job:
job_manager.active_job = None
self.close()
return
if len(job_manager.failed_jobs) == 0:
self.session.openWithCallback(self.askUnzipCB, MessageBox, _("The image is downloaded. Do you want to flash now?"), MessageBox.TYPE_YESNO)
else:
self.session.open(MessageBox, _("Download Failed !!"), type = MessageBox.TYPE_ERROR)
def askUnzipCB(self, ret):
if ret:
self.unzip_image(self.filename, flashPath)
else:
self.show()
def unzip_image(self, filename, path):
print "Unzip %s to %s" %(filename,path)
self.session.openWithCallback(self.cmdFinished, Console, title = _("Unzipping files, Please wait ..."), cmdlist = ['unzip ' + filename + ' -o -d ' + path, "sleep 3"], closeOnSuccess = True)
def cmdFinished(self):
self.prepair_flashtmp(flashPath)
self.Start_Flashing()
def Start_Flashing(self):
print "Start Flashing"
if os.path.exists(ofgwritePath):
text = _("Flashing: ")
if self.simulate:
text += _("Simulate (no write)")
cmd = "%s -n -r -k %s > /dev/null 2>&1" % (ofgwritePath, flashTmp)
self.close()
message = "echo -e '\n"
message += _('Show only found image and mtd partitions.\n')
message += "'"
else:
text += _("root and kernel")
cmd = "%s -r -k %s > /dev/null 2>&1" % (ofgwritePath, flashTmp)
message = "echo -e '\n"
message += _('ofgwrite will stop enigma2 now to run the flash.\n')
message += _('Your %s %s will freeze during the flashing process.\n') % (getMachineBrand(), getMachineName())
message += _('Please: DO NOT reboot your %s %s and turn off the power.\n') % (getMachineBrand(), getMachineName())
message += _('The image or kernel will be flashing and auto booted in few minutes.\n')
if self.box() == 'gb800solo':
message += _('GB800SOLO takes about 20 mins !!\n')
message += "'"
self.session.open(Console, text,[message, cmd])
def prepair_flashtmp(self, tmpPath):
if os.path.exists(flashTmp):
flashTmpold = flashTmp + 'old'
os.system('mv %s %s' %(flashTmp, flashTmpold))
os.system('rm -rf %s' %flashTmpold)
if not os.path.exists(flashTmp):
os.mkdir(flashTmp)
kernel = True
rootfs = True
for path, subdirs, files in os.walk(tmpPath):
for name in files:
if name.find('kernel') > -1 and name.endswith('.bin') and kernel:
binfile = os.path.join(path, name)
dest = flashTmp + '/kernel.bin'
shutil.copyfile(binfile, dest)
kernel = False
elif name.find('root') > -1 and (name.endswith('.bin') or name.endswith('.jffs2')) and rootfs:
binfile = os.path.join(path, name)
dest = flashTmp + '/rootfs.bin'
shutil.copyfile(binfile, dest)
rootfs = False
elif name.find('uImage') > -1 and kernel:
binfile = os.path.join(path, name)
dest = flashTmp + '/uImage'
shutil.copyfile(binfile, dest)
kernel = False
elif name.find('e2jffs2') > -1 and name.endswith('.img') and rootfs:
binfile = os.path.join(path, name)
dest = flashTmp + '/e2jffs2.img'
shutil.copyfile(binfile, dest)
rootfs = False
def yellow(self):
if not self.Online:
self.session.openWithCallback(self.DeviceBrowserClosed, DeviceBrowser, None, matchingPattern="^.*\.(zip|bin|jffs2)", showDirectories=True, showMountpoints=True, inhibitMounts=["/autofs/sr0/"])
def DeviceBrowserClosed(self, path, filename, binorzip):
if path:
print path, filename, binorzip
strPath = str(path)
if strPath[-1] == '/':
strPath = strPath[:-1]
self.imagePath = strPath
if os.path.exists(flashTmp):
os.system('rm -rf ' + flashTmp)
os.mkdir(flashTmp)
if binorzip == 0:
for files in os.listdir(self.imagePath):
if files.endswith(".bin") or files.endswith('.jffs2') or files.endswith('.img'):
self.prepair_flashtmp(strPath)
break
self.Start_Flashing()
elif binorzip == 1:
self.unzip_image(strPath + '/' + filename, flashPath)
else:
self.layoutFinished()
else:
self.imagePath = imagePath
def layoutFinished(self):
box = self.box()
self.imagelist = []
if self.Online:
self["key_yellow"].setText("")
if image == 2:
if self.feed == "opendroid":
self.feedurl = feedurl_opendroid
self["key_blue"].setText("opendroidimages")
else:
self.feedurl = feedurl_opendroid
self["key_blue"].setText("opendroid 5.0")
else:
self.feedurl = feedurl_atv
self["key_blue"].setText("")
if self.feedurl == feedurl_opendroid:
url = '%s' % (self.feedurl)
else:
url = '%s/%s' % (self.feedurl,box)
req = urllib2.Request(url)
try:
response = urllib2.urlopen(req)
except urllib2.URLError as e:
print "URL ERROR: %s" % e
return
try:
the_page = response.read()
except urllib2.HTTPError as e:
print "HTTP download ERROR: %s" % e.code
return
lines = the_page.split('\n')
tt = len(box)
for line in lines:
if line.find("<a href='%s/" % box) > -1:
t = line.find("<a href='%s/" % box)
if self.feed == "atv":
self.imagelist.append(line[t+tt+10:t+tt+tt+39])
else:
self.imagelist.append(line[t+tt+10:t+tt+tt+40])
if self.feedurl == feedurl_opendroid:
if line.find('%s' % box) > -1:
t = line.find('<a href="')
e = line.find('zip"')
self.imagelist.append(line[t+9:e+3])
else:
if line.find('<a href="o') > -1:
t = line.find('<a href="o')
e = line.find('zip"')
self.imagelist.append(line[t+9:e+3])
else:
self["key_blue"].setText(_("Delete"))
self["key_yellow"].setText(_("Devices"))
for name in os.listdir(self.imagePath):
if name.endswith(".zip"): # and name.find(box) > 1:
self.imagelist.append(name)
self.imagelist.sort()
if os.path.exists(flashTmp):
for file in os.listdir(flashTmp):
if file.find(".bin") > -1:
self.imagelist.insert( 0, str(flashTmp))
break
self["imageList"].l.setList(self.imagelist)
class ImageDownloadJob(Job):
def __init__(self, url, filename, file):
Job.__init__(self, _("Downloading %s" %file))
ImageDownloadTask(self, url, filename)
class DownloaderPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return self.error_message
class ImageDownloadTask(Task):
def __init__(self, job, url, path):
Task.__init__(self, job, _("Downloading"))
self.postconditions.append(DownloaderPostcondition())
self.job = job
self.url = url
self.path = path
self.error_message = ""
self.last_recvbytes = 0
self.error_message = None
self.download = None
self.aborted = False
def run(self, callback):
self.callback = callback
self.download = downloadWithProgress(self.url,self.path)
self.download.addProgress(self.download_progress)
self.download.start().addCallback(self.download_finished).addErrback(self.download_failed)
print "[ImageDownloadTask] downloading", self.url, "to", self.path
def abort(self):
print "[ImageDownloadTask] aborting", self.url
if self.download:
self.download.stop()
self.aborted = True
def download_progress(self, recvbytes, totalbytes):
if ( recvbytes - self.last_recvbytes ) > 10000: # anti-flicker
self.progress = int(100*(float(recvbytes)/float(totalbytes)))
self.name = _("Downloading") + ' ' + "%d of %d kBytes" % (recvbytes/1024, totalbytes/1024)
self.last_recvbytes = recvbytes
def download_failed(self, failure_instance=None, error_message=""):
self.error_message = error_message
if error_message == "" and failure_instance is not None:
self.error_message = failure_instance.getErrorMessage()
Task.processFinished(self, 1)
def download_finished(self, string=""):
if self.aborted:
self.finish(aborted = True)
else:
Task.processFinished(self, 0)
class DeviceBrowser(Screen, HelpableScreen):
skin = """
<screen name="DeviceBrowser" position="center,center" size="520,430" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="message" render="Label" position="5,50" size="510,150" font="Regular;16" />
<widget name="filelist" position="5,210" size="510,220" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, startdir, message="", showDirectories = True, showFiles = True, showMountpoints = True, matchingPattern = "", useServiceRef = False, inhibitDirs = False, inhibitMounts = False, isTop = False, enableWrapAround = False, additionalExtensions = None):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
Screen.setTitle(self, _("Please select medium"))
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText()
self["message"] = StaticText(message)
self.filelist = FileList(startdir, showDirectories = showDirectories, showFiles = showFiles, showMountpoints = showMountpoints, matchingPattern = matchingPattern, useServiceRef = useServiceRef, inhibitDirs = inhibitDirs, inhibitMounts = inhibitMounts, isTop = isTop, enableWrapAround = enableWrapAround, additionalExtensions = additionalExtensions)
self["filelist"] = self.filelist
self["FilelistActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.use,
"red": self.exit,
"ok": self.ok,
"cancel": self.exit
})
hotplugNotifier.append(self.hotplugCB)
self.onShown.append(self.updateButton)
self.onClose.append(self.removeHotplug)
def hotplugCB(self, dev, action):
print "[hotplugCB]", dev, action
self.updateButton()
def updateButton(self):
if self["filelist"].getFilename() or self["filelist"].getCurrentDirectory():
self["key_green"].text = _("Flash")
else:
self["key_green"].text = ""
def removeHotplug(self):
print "[removeHotplug]"
hotplugNotifier.remove(self.hotplugCB)
def ok(self):
if self.filelist.canDescent():
if self["filelist"].showMountpoints == True and self["filelist"].showDirectories == False:
self.use()
else:
self.filelist.descent()
def use(self):
print "[use]", self["filelist"].getCurrentDirectory(), self["filelist"].getFilename()
if self["filelist"].getFilename() is not None and self["filelist"].getCurrentDirectory() is not None:
if self["filelist"].getFilename().endswith(".bin") or self["filelist"].getFilename().endswith(".jffs2"):
self.close(self["filelist"].getCurrentDirectory(), self["filelist"].getFilename(), 0)
elif self["filelist"].getFilename().endswith(".zip"):
self.close(self["filelist"].getCurrentDirectory(), self["filelist"].getFilename(), 1)
else:
return
def exit(self):
self.close(False, False, -1)
| gpl-2.0 |
chickenboy10/tgstation | tools/mapmerge/maintloot.py | 118 | 1940 | #!/usr/bin/env python3
import argparse
import collections
import re
from map_helpers import parse_map
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("mapfile")
args = parser.parse_args()
M = parse_map(args.mapfile)
# Format of this map parsing
# dict(coordinates->mapkey)
grid = M["grid"]
# dict(mapkey->tilecontents)
dictionary = M["dictionary"]
# tilecontents are a list of atoms, path is first
lootdrop_path = "/obj/effect/spawner/lootdrop/maintenance"
area_path = "/area"
follow_up = set()
for key, atoms in dictionary.items():
#atom is a string
for atom in atoms:
if atom.startswith(lootdrop_path):
if(key in follow_up):
print("Hey, '{}' has multiple maintlootdrops...")
follow_up.add(key)
# Count the number of times each map key appears
appears = collections.Counter()
for coord, key in grid.items():
if key in follow_up:
appears[key] += 1
tally = collections.Counter()
for key in follow_up:
# Because I am a terrible person, and don't actually care about
# building a proper parser for this "object notation" that byond
# uses, I'm just going to cheat.
area = None
count = 0
for atom in dictionary[key]:
if atom.startswith(lootdrop_path):
amount = 1
mo = re.search(r'lootcount = (\d+)', atom)
if mo is not None:
amount = int(mo.group(1))
count += amount
elif atom.startswith(area_path):
area = atom
# Multiply by the number of times this model is used
tally[area] += (count * appears[key])
for area, total in tally.items():
print("{}: {}".format(area, total))
print("TOTAL: {}".format(sum(tally.values())))
| agpl-3.0 |
aslamplr/shorts | gdata/calendar/data.py | 121 | 9852 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the Google Calendar Data API"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
import atom.data
import gdata.acl.data
import gdata.data
import gdata.geo.data
import gdata.opensearch.data
GCAL_NAMESPACE = 'http://schemas.google.com/gCal/2005'
GCAL_TEMPLATE = '{%s}%%s' % GCAL_NAMESPACE
WEB_CONTENT_LINK_REL = '%s/%s' % (GCAL_NAMESPACE, 'webContent')
class AccessLevelProperty(atom.core.XmlElement):
"""Describes how much a given user may do with an event or calendar"""
_qname = GCAL_TEMPLATE % 'accesslevel'
value = 'value'
class AllowGSync2Property(atom.core.XmlElement):
"""Whether the user is permitted to run Google Apps Sync"""
_qname = GCAL_TEMPLATE % 'allowGSync2'
value = 'value'
class AllowGSyncProperty(atom.core.XmlElement):
"""Whether the user is permitted to run Google Apps Sync"""
_qname = GCAL_TEMPLATE % 'allowGSync'
value = 'value'
class AnyoneCanAddSelfProperty(atom.core.XmlElement):
"""Whether anyone can add self as attendee"""
_qname = GCAL_TEMPLATE % 'anyoneCanAddSelf'
value = 'value'
class CalendarAclRole(gdata.acl.data.AclRole):
"""Describes the Calendar roles of an entry in the Calendar access control list"""
_qname = gdata.acl.data.GACL_TEMPLATE % 'role'
class CalendarCommentEntry(gdata.data.GDEntry):
"""Describes an entry in a feed of a Calendar event's comments"""
class CalendarCommentFeed(gdata.data.GDFeed):
"""Describes feed of a Calendar event's comments"""
entry = [CalendarCommentEntry]
class CalendarComments(gdata.data.Comments):
"""Describes a container of a feed link for Calendar comment entries"""
_qname = gdata.data.GD_TEMPLATE % 'comments'
class CalendarExtendedProperty(gdata.data.ExtendedProperty):
"""Defines a value for the realm attribute that is used only in the calendar API"""
_qname = gdata.data.GD_TEMPLATE % 'extendedProperty'
class CalendarWhere(gdata.data.Where):
"""Extends the base Where class with Calendar extensions"""
_qname = gdata.data.GD_TEMPLATE % 'where'
class ColorProperty(atom.core.XmlElement):
"""Describes the color of a calendar"""
_qname = GCAL_TEMPLATE % 'color'
value = 'value'
class GuestsCanInviteOthersProperty(atom.core.XmlElement):
"""Whether guests can invite others to the event"""
_qname = GCAL_TEMPLATE % 'guestsCanInviteOthers'
value = 'value'
class GuestsCanModifyProperty(atom.core.XmlElement):
"""Whether guests can modify event"""
_qname = GCAL_TEMPLATE % 'guestsCanModify'
value = 'value'
class GuestsCanSeeGuestsProperty(atom.core.XmlElement):
"""Whether guests can see other attendees"""
_qname = GCAL_TEMPLATE % 'guestsCanSeeGuests'
value = 'value'
class HiddenProperty(atom.core.XmlElement):
"""Describes whether a calendar is hidden"""
_qname = GCAL_TEMPLATE % 'hidden'
value = 'value'
class IcalUIDProperty(atom.core.XmlElement):
"""Describes the UID in the ical export of the event"""
_qname = GCAL_TEMPLATE % 'uid'
value = 'value'
class OverrideNameProperty(atom.core.XmlElement):
"""Describes the override name property of a calendar"""
_qname = GCAL_TEMPLATE % 'overridename'
value = 'value'
class PrivateCopyProperty(atom.core.XmlElement):
"""Indicates whether this is a private copy of the event, changes to which should not be sent to other calendars"""
_qname = GCAL_TEMPLATE % 'privateCopy'
value = 'value'
class QuickAddProperty(atom.core.XmlElement):
"""Describes whether gd:content is for quick-add processing"""
_qname = GCAL_TEMPLATE % 'quickadd'
value = 'value'
class ResourceProperty(atom.core.XmlElement):
"""Describes whether gd:who is a resource such as a conference room"""
_qname = GCAL_TEMPLATE % 'resource'
value = 'value'
id = 'id'
class EventWho(gdata.data.Who):
"""Extends the base Who class with Calendar extensions"""
_qname = gdata.data.GD_TEMPLATE % 'who'
resource = ResourceProperty
class SelectedProperty(atom.core.XmlElement):
"""Describes whether a calendar is selected"""
_qname = GCAL_TEMPLATE % 'selected'
value = 'value'
class SendAclNotificationsProperty(atom.core.XmlElement):
"""Describes whether to send ACL notifications to grantees"""
_qname = GCAL_TEMPLATE % 'sendAclNotifications'
value = 'value'
class CalendarAclEntry(gdata.acl.data.AclEntry):
"""Describes an entry in a feed of a Calendar access control list (ACL)"""
send_acl_notifications = SendAclNotificationsProperty
class CalendarAclFeed(gdata.data.GDFeed):
"""Describes a Calendar access contorl list (ACL) feed"""
entry = [CalendarAclEntry]
class SendEventNotificationsProperty(atom.core.XmlElement):
"""Describes whether to send event notifications to other participants of the event"""
_qname = GCAL_TEMPLATE % 'sendEventNotifications'
value = 'value'
class SequenceNumberProperty(atom.core.XmlElement):
"""Describes sequence number of an event"""
_qname = GCAL_TEMPLATE % 'sequence'
value = 'value'
class CalendarRecurrenceExceptionEntry(gdata.data.GDEntry):
"""Describes an entry used by a Calendar recurrence exception entry link"""
uid = IcalUIDProperty
sequence = SequenceNumberProperty
class CalendarRecurrenceException(gdata.data.RecurrenceException):
"""Describes an exception to a recurring Calendar event"""
_qname = gdata.data.GD_TEMPLATE % 'recurrenceException'
class SettingsProperty(atom.core.XmlElement):
"""User preference name-value pair"""
_qname = GCAL_TEMPLATE % 'settingsProperty'
name = 'name'
value = 'value'
class SettingsEntry(gdata.data.GDEntry):
"""Describes a Calendar Settings property entry"""
settings_property = SettingsProperty
class CalendarSettingsFeed(gdata.data.GDFeed):
"""Personal settings for Calendar application"""
entry = [SettingsEntry]
class SuppressReplyNotificationsProperty(atom.core.XmlElement):
"""Lists notification methods to be suppressed for this reply"""
_qname = GCAL_TEMPLATE % 'suppressReplyNotifications'
methods = 'methods'
class SyncEventProperty(atom.core.XmlElement):
"""Describes whether this is a sync scenario where the Ical UID and Sequence number are honored during inserts and updates"""
_qname = GCAL_TEMPLATE % 'syncEvent'
value = 'value'
class When(gdata.data.When):
"""Extends the gd:when element to add reminders"""
reminder = [gdata.data.Reminder]
class CalendarEventEntry(gdata.data.BatchEntry):
"""Describes a Calendar event entry"""
quick_add = QuickAddProperty
send_event_notifications = SendEventNotificationsProperty
sync_event = SyncEventProperty
anyone_can_add_self = AnyoneCanAddSelfProperty
extended_property = [CalendarExtendedProperty]
sequence = SequenceNumberProperty
guests_can_invite_others = GuestsCanInviteOthersProperty
guests_can_modify = GuestsCanModifyProperty
guests_can_see_guests = GuestsCanSeeGuestsProperty
georss_where = gdata.geo.data.GeoRssWhere
private_copy = PrivateCopyProperty
suppress_reply_notifications = SuppressReplyNotificationsProperty
uid = IcalUIDProperty
where = [gdata.data.Where]
when = [When]
who = [gdata.data.Who]
transparency = gdata.data.Transparency
comments = gdata.data.Comments
event_status = gdata.data.EventStatus
visibility = gdata.data.Visibility
recurrence = gdata.data.Recurrence
recurrence_exception = [gdata.data.RecurrenceException]
original_event = gdata.data.OriginalEvent
reminder = [gdata.data.Reminder]
class TimeZoneProperty(atom.core.XmlElement):
"""Describes the time zone of a calendar"""
_qname = GCAL_TEMPLATE % 'timezone'
value = 'value'
class TimesCleanedProperty(atom.core.XmlElement):
"""Describes how many times calendar was cleaned via Manage Calendars"""
_qname = GCAL_TEMPLATE % 'timesCleaned'
value = 'value'
class CalendarEntry(gdata.data.GDEntry):
"""Describes a Calendar entry in the feed of a user's calendars"""
timezone = TimeZoneProperty
overridename = OverrideNameProperty
hidden = HiddenProperty
selected = SelectedProperty
times_cleaned = TimesCleanedProperty
color = ColorProperty
where = [CalendarWhere]
accesslevel = AccessLevelProperty
class CalendarEventFeed(gdata.data.BatchFeed):
"""Describes a Calendar event feed"""
allow_g_sync2 = AllowGSync2Property
timezone = TimeZoneProperty
entry = [CalendarEventEntry]
times_cleaned = TimesCleanedProperty
allow_g_sync = AllowGSyncProperty
class CalendarFeed(gdata.data.GDFeed):
"""Describes a feed of Calendars"""
entry = [CalendarEntry]
class WebContentGadgetPref(atom.core.XmlElement):
"""Describes a single web content gadget preference"""
_qname = GCAL_TEMPLATE % 'webContentGadgetPref'
name = 'name'
value = 'value'
class WebContent(atom.core.XmlElement):
"""Describes a "web content" extension"""
_qname = GCAL_TEMPLATE % 'webContent'
height = 'height'
width = 'width'
web_content_gadget_pref = [WebContentGadgetPref]
url = 'url'
display = 'display'
class WebContentLink(atom.data.Link):
"""Describes a "web content" link"""
def __init__(self, title=None, href=None, link_type=None,
web_content=None):
atom.data.Link.__init__(self, rel=WEB_CONTENT_LINK_REL, title=title, href=href,
link_type=link_type)
web_content = WebContent
| mit |
andreparames/odoo | addons/delivery/__init__.py | 376 | 1103 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import delivery
import partner
import sale
import stock
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ifduyue/sentry | src/sentry/web/frontend/vsts_extension_configuration.py | 1 | 2305 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
from sentry.integrations.pipeline import IntegrationPipeline
from sentry.models import Organization
from sentry.web.frontend.base import BaseView
class VstsExtensionConfigurationView(BaseView):
auth_required = False
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated():
configure_uri = '{}?{}'.format(
reverse('vsts-extension-configuration'),
urlencode({
'targetId': request.GET['targetId'],
'targetName': request.GET['targetName'],
}),
)
redirect_uri = '{}?{}'.format(
reverse('sentry-login'),
urlencode({'next': configure_uri}),
)
return self.redirect(redirect_uri)
if request.user.get_orgs().count() == 1:
org = request.user.get_orgs()[0]
pipeline = self.init_pipeline(
request,
org,
request.GET['targetId'],
request.GET['targetName'],
)
return pipeline.current_step()
else:
return self.redirect('/extensions/vsts/link/?{}'.format(
urlencode({
'targetId': request.GET['targetId'],
'targetName': request.GET['targetName'],
})
))
def post(self, request, *args, **kwargs):
# Update Integration with Organization chosen
org = Organization.objects.get(
slug=request.POST['organization'],
)
pipeline = self.init_pipeline(
request,
org,
request.POST['vsts_id'],
request.POST['vsts_name'],
)
return pipeline.current_step()
def init_pipeline(self, request, organization, id, name):
pipeline = IntegrationPipeline(
request=request,
organization=organization,
provider_key='vsts-extension',
)
pipeline.initialize()
pipeline.bind_state('vsts', {
'AccountId': id,
'AccountName': name,
})
return pipeline
| bsd-3-clause |
x303597316/hue | apps/jobbrowser/src/jobbrowser/yarn_models.py | 14 | 13800 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import time
import urlparse
import urllib2
from lxml import html
from django.utils.translation import ugettext as _
from desktop.lib.rest.resource import Resource
from desktop.lib.view_util import format_duration_in_millis
from hadoop.conf import YARN_CLUSTERS
from hadoop.yarn.clients import get_log_client
from jobbrowser.models import format_unixtime_ms
LOGGER = logging.getLogger(__name__)
class Application(object):
def __init__(self, attrs, rm_api=None):
self.api = rm_api
for attr in attrs.keys():
setattr(self, attr, attrs[attr])
self._fixup()
def _fixup(self):
self.is_mr2 = True
jobid = self.id
if self.state in ('FINISHED', 'FAILED', 'KILLED'):
setattr(self, 'status', self.finalStatus)
else:
setattr(self, 'status', self.state)
setattr(self, 'jobId', jobid)
setattr(self, 'jobId_short', re.sub('(application|job)_', '', self.jobId))
setattr(self, 'jobName', self.name)
setattr(self, 'applicationType', self.applicationType)
setattr(self, 'is_retired', False)
setattr(self, 'maps_percent_complete', self.progress)
setattr(self, 'reduces_percent_complete', self.progress)
setattr(self, 'queueName', self.queue)
setattr(self, 'priority', '')
if self.finishedTime == 0:
finishTime = int(time.time() * 1000)
else:
finishTime = self.finishedTime
setattr(self, 'durationInMillis', finishTime - self.startedTime)
setattr(self, 'startTimeMs', self.startedTime)
setattr(self, 'startTimeFormatted', format_unixtime_ms(self.startedTime))
setattr(self, 'finishTimeFormatted', format_unixtime_ms(finishTime))
setattr(self, 'finishedMaps', None)
setattr(self, 'desiredMaps', None)
setattr(self, 'finishedReduces', None)
setattr(self, 'desiredReduces', None)
setattr(self, 'durationFormatted', format_duration_in_millis(self.durationInMillis))
for attr in ['preemptedResourceVCores', 'vcoreSeconds', 'memorySeconds', 'diagnostics']:
if not hasattr(self, attr):
setattr(self, attr, 'N/A')
if not hasattr(self, 'acls'):
setattr(self, 'acls', {})
# YARN returns a N/A url if it's not set.
if not hasattr(self, 'trackingUrl') or self.trackingUrl == 'http://N/A':
self.trackingUrl = None
def kill(self):
return self.api.kill(self.id)
def filter_tasks(self, *args, **kwargs):
return []
class SparkJob(Application):
def __init__(self, job, api=None):
super(SparkJob, self).__init__(job, api)
self._scrape()
def _history_application_metrics(self, html_doc):
metrics = []
root = html.fromstring(html_doc)
tables = root.findall('.//table')
metrics_table = tables[2].findall('.//tr')
for tr in metrics_table:
header = tr.find('.//th')
value = tr.findall('.//td')
if value:
header = header.text.strip().replace(':', '')
value = value[0].text.strip()
metrics.append({
'header': header,
'value': value
})
return metrics
def _scrape(self):
# XXX: we have to scrape the tracking URL directly because
# spark jobs don't have a JSON api via YARN or app server
# see YARN-1530, SPARK-1537 for progress on these apis
self.scrapedData = {}
try:
res = urllib2.urlopen(self.trackingUrl)
html_doc = res.read()
if self.trackingUI == 'History':
self.scrapedData['metrics'] = self._history_application_metrics(html_doc)
except Exception, e:
# Prevent a nosedive. Don't create metrics if api changes or url is unreachable.
self.scrapedData['metrics'] = []
class Job(object):
def __init__(self, api, attrs):
self.api = api
self.is_mr2 = True
for attr in attrs.keys():
if attr == 'acls':
# 'acls' are actually not available in the API
LOGGER.warn('Not using attribute: %s' % attrs[attr])
else:
setattr(self, attr, attrs[attr])
self._fixup()
# Set MAPS/REDUCES completion percentage
if hasattr(self, 'mapsTotal'):
self.desiredMaps = self.mapsTotal
if self.desiredMaps == 0:
self.maps_percent_complete = 0
else:
self.maps_percent_complete = int(round(float(self.finishedMaps) / self.desiredMaps * 100))
if hasattr(self, 'reducesTotal'):
self.desiredReduces = self.reducesTotal
if self.desiredReduces == 0:
self.reduces_percent_complete = 0
else:
self.reduces_percent_complete = int(round(float(self.finishedReduces) / self.desiredReduces * 100))
def _fixup(self):
jobid = self.id
setattr(self, 'status', self.state)
setattr(self, 'jobId', jobid)
setattr(self, 'jobId_short', self.jobId.replace('job_', ''))
setattr(self, 'is_retired', False)
setattr(self, 'maps_percent_complete', None)
setattr(self, 'reduces_percent_complete', None)
setattr(self, 'duration', self.finishTime - self.startTime)
setattr(self, 'durationFormatted', format_duration_in_millis(self.duration))
setattr(self, 'finishTimeFormatted', format_unixtime_ms(self.finishTime))
setattr(self, 'startTimeFormatted', format_unixtime_ms(self.startTime))
setattr(self, 'finishedMaps', self.mapsCompleted)
setattr(self, 'desiredMaps', 0)
setattr(self, 'finishedReduces', self.reducesCompleted)
setattr(self, 'desiredReduces', 0)
setattr(self, 'applicationType', 'MR2')
def kill(self):
return self.api.kill(self.id)
@property
def counters(self):
counters = self.api.counters(self.id)
if counters:
return counters['jobCounters']
else:
return None
@property
def acls(self):
if not hasattr(self, '_acls'):
self._acls = dict([(name, self.conf_keys[name]) for name in self.conf_keys if 'acl' in name])
return self._acls
@property
def full_job_conf(self):
if not hasattr(self, '_full_job_conf'):
self._full_job_conf = self.api.conf(self.id)['conf']
return self._full_job_conf
@property
def conf_keys(self):
return dict([(line['name'], line['value']) for line in self.full_job_conf['property']])
def get_task(self, task_id):
json = self.api.task(self.id, task_id)['task']
return Task(self, json)
def filter_tasks(self, task_types=None, task_states=None, task_text=None):
return [Task(self, task) for task in self.api.tasks(self.id).get('tasks', {}).get('task', [])
if (not task_types or task['type'].lower() in task_types) and
(not task_states or task['state'].lower() in task_states) and
(not task_text or task_text.lower() in str(task).lower())]
@property
def job_attempts(self):
if not hasattr(self, '_job_attempts'):
self._job_attempts = self.api.job_attempts(self.id)['jobAttempts']
return self._job_attempts
class KilledJob(Job):
def __init__(self, api, attrs):
self._fixup()
super(KilledJob, self).__init__(api, attrs)
if not hasattr(self, 'finishTime'):
setattr(self, 'finishTime', self.finishedTime)
if not hasattr(self, 'startTime'):
setattr(self, 'startTime', self.startedTime)
super(KilledJob, self)._fixup()
setattr(self, 'jobId_short', self.jobId.replace('application_', ''))
def _fixup(self):
if not hasattr(self, 'mapsCompleted'):
setattr(self, 'mapsCompleted', 0)
if not hasattr(self, 'reducesCompleted'):
setattr(self, 'reducesCompleted', 0)
@property
def counters(self):
return {}
@property
def full_job_conf(self):
return {'property': []}
def filter_tasks(self, task_types=None, task_states=None, task_text=None):
return []
@property
def job_attempts(self):
return {'jobAttempt': []}
class Task:
def __init__(self, job, attrs):
self.job = job
if attrs:
for key, value in attrs.iteritems():
setattr(self, key, value)
self.is_mr2 = True
self._fixup()
def _fixup(self):
setattr(self, 'jobId', self.job.jobId)
setattr(self, 'taskId', self.id)
setattr(self, 'taskId_short', self.id)
setattr(self, 'taskType', self.type)
setattr(self, 'execStartTimeMs', self.startTime)
setattr(self, 'mostRecentState', self.state)
setattr(self, 'execStartTimeFormatted', format_unixtime_ms(self.startTime))
setattr(self, 'execFinishTimeFormatted', format_unixtime_ms(self.finishTime))
setattr(self, 'startTimeFormatted', format_unixtime_ms(self.startTime))
setattr(self, 'progress', self.progress / 100)
@property
def attempts(self):
# We can cache as we deal with history server
if not hasattr(self, '_attempts'):
task_attempts = self.job.api.task_attempts(self.job.id, self.id)['taskAttempts']
if task_attempts:
self._attempts = [Attempt(self, attempt) for attempt in task_attempts['taskAttempt']]
else:
self._attempts = []
return self._attempts
@property
def taskAttemptIds(self):
if not hasattr(self, '_taskAttemptIds'):
self._taskAttemptIds = [attempt.id for attempt in self.attempts]
return self._taskAttemptIds
@property
def counters(self):
if not hasattr(self, '_counters'):
self._counters = self.job.api.task_counters(self.jobId, self.id)['jobTaskCounters']
return self._counters
def get_attempt(self, attempt_id):
json = self.job.api.task_attempt(self.jobId, self.id, attempt_id)['taskAttempt']
return Attempt(self, json)
class Attempt:
def __init__(self, task, attrs):
self.task = task
if attrs:
for key, value in attrs.iteritems():
setattr(self, key, value)
self.is_mr2 = True
self._fixup()
def _fixup(self):
setattr(self, 'attemptId', self.id)
setattr(self, 'attemptId_short', self.id)
setattr(self, 'taskTrackerId', getattr(self, 'assignedContainerId', None))
setattr(self, 'startTimeFormatted', format_unixtime_ms(self.startTime))
setattr(self, 'finishTimeFormatted', format_unixtime_ms(self.finishTime))
setattr(self, 'outputSize', None)
setattr(self, 'phase', None)
setattr(self, 'shuffleFinishTimeFormatted', None)
setattr(self, 'sortFinishTimeFormatted', None)
setattr(self, 'mapFinishTimeFormatted', None)
setattr(self, 'progress', self.progress / 100)
if not hasattr(self, 'diagnostics'):
self.diagnostics = ''
if not hasattr(self, 'assignedContainerId'):
setattr(self, 'assignedContainerId', '')
@property
def counters(self):
if not hasattr(self, '_counters'):
self._counters = self.task.job.api.task_attempt_counters(self.task.jobId, self.task.id, self.id)['jobCounters']
return self._counters
def get_task_log(self, offset=0):
logs = []
attempt = self.task.job.job_attempts['jobAttempt'][-1]
log_link = attempt['logsLink']
# Get MR task logs
# Don't hack up the urls if they've been migrated to the job history server.
for cluster in YARN_CLUSTERS.get().itervalues():
if log_link.startswith(cluster.HISTORY_SERVER_API_URL.get()):
break
else:
if self.assignedContainerId:
log_link = log_link.replace(attempt['containerId'], self.assignedContainerId)
if hasattr(self, 'nodeHttpAddress'):
log_link = log_link.replace(attempt['nodeHttpAddress'].split(':')[0], self.nodeHttpAddress.split(':')[0])
for name in ('stdout', 'stderr', 'syslog'):
link = '/%s/' % name
params = {}
if int(offset) >= 0:
params['start'] = offset
try:
log_link = re.sub('job_[^/]+', self.id, log_link)
root = Resource(get_log_client(log_link), urlparse.urlsplit(log_link)[2], urlencode=False)
response = root.get(link, params=params)
log = html.fromstring(response, parser=html.HTMLParser()).xpath('/html/body/table/tbody/tr/td[2]')[0].text_content()
except Exception, e:
log = _('Failed to retrieve log: %s' % e)
try:
debug_info = '\nLog Link: %s' % log_link
debug_info += '\nHTML Response: %s' % response
LOGGER.error(debug_info)
except:
LOG.exception('failed to build debug info')
logs.append(log)
return logs + [''] * (3 - len(logs))
class Container:
def __init__(self, attrs):
if attrs:
for key, value in attrs['container'].iteritems():
setattr(self, key, value)
self.is_mr2 = True
self._fixup()
def _fixup(self):
setattr(self, 'trackerId', self.id)
setattr(self, 'httpPort', self.nodeId.split(':')[1])
setattr(self, 'host', self.nodeId.split(':')[0])
setattr(self, 'lastSeenMs', None)
setattr(self, 'lastSeenFormatted', '')
setattr(self, 'totalVirtualMemory', None)
setattr(self, 'totalPhysicalMemory', self.totalMemoryNeededMB)
setattr(self, 'availableSpace', None)
setattr(self, 'failureCount', None)
setattr(self, 'mapCount', None)
setattr(self, 'reduceCount', None)
setattr(self, 'maxMapTasks', None)
setattr(self, 'maxReduceTasks', None)
setattr(self, 'taskReports', None)
| apache-2.0 |
tsabi/Odoo-tsabi-fixes | addons/hr_payroll/__init__.py | 433 | 1137 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll
import report
import wizard
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
darktears/chromium-crosswalk | native_client_sdk/src/build_tools/generate_make.py | 61 | 8678 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import buildbot_common
import build_version
import getos
from buildbot_common import ErrorExit
from easy_template import RunTemplateFileIfChanged
from build_paths import SDK_RESOURCE_DIR
def Trace(msg):
if Trace.verbose:
sys.stderr.write(str(msg) + '\n')
Trace.verbose = False
def IsExample(desc):
dest = desc['DEST']
return dest.startswith(('examples', 'tests', 'getting_started'))
def GenerateSourceCopyList(desc):
sources = []
# Some examples use their own Makefile/sources/etc.
if 'TARGETS' not in desc:
# Only copy the DATA files.
return desc.get('DATA', [])
# Add sources for each target
for target in desc['TARGETS']:
sources.extend(target['SOURCES'])
# And HTML and data files
sources.extend(desc.get('DATA', []))
if IsExample(desc):
sources.append('common.js')
if not desc.get('NO_PACKAGE_FILES'):
sources.extend(['icon128.png', 'background.js'])
return sources
def GetSourcesDict(sources):
source_map = {}
for key in ['.c', '.cc']:
source_list = [fname for fname in sources if fname.endswith(key)]
if source_list:
source_map[key] = source_list
else:
source_map[key] = []
return source_map
def GetProjectObjects(source_dict):
object_list = []
for key in ['.c', '.cc']:
for src in source_dict[key]:
object_list.append(os.path.splitext(src)[0])
return object_list
def GetPlatforms(plat_list, plat_filter, first_toolchain):
platforms = []
for plat in plat_list:
if plat in plat_filter:
platforms.append(plat)
if first_toolchain:
return [platforms[0]]
return platforms
def ErrorMsgFunc(text):
sys.stderr.write(text + '\n')
def AddMakeBat(pepperdir, makepath):
"""Create a simple batch file to execute Make.
Creates a simple batch file named make.bat for the Windows platform at the
given path, pointing to the Make executable in the SDK."""
makepath = os.path.abspath(makepath)
if not makepath.startswith(pepperdir):
ErrorExit('Make.bat not relative to Pepper directory: ' + makepath)
makeexe = os.path.abspath(os.path.join(pepperdir, 'tools'))
relpath = os.path.relpath(makeexe, makepath)
fp = open(os.path.join(makepath, 'make.bat'), 'wb')
outpath = os.path.join(relpath, 'make.exe')
# Since make.bat is only used by Windows, for Windows path style
outpath = outpath.replace(os.path.sep, '\\')
fp.write('@%s %%*\n' % outpath)
fp.close()
def FindFile(name, srcroot, srcdirs):
checks = []
for srcdir in srcdirs:
srcfile = os.path.join(srcroot, srcdir, name)
srcfile = os.path.abspath(srcfile)
if os.path.exists(srcfile):
return srcfile
else:
checks.append(srcfile)
ErrorMsgFunc('%s not found in:\n\t%s' % (name, '\n\t'.join(checks)))
return None
def IsNexe(desc):
for target in desc['TARGETS']:
if target['TYPE'] == 'main':
return True
return False
def ProcessHTML(srcroot, dstroot, desc, toolchains, configs, first_toolchain):
name = desc['NAME']
nmf = desc['TARGETS'][0]['NAME']
outdir = os.path.join(dstroot, desc['DEST'], name)
srcpath = os.path.join(srcroot, 'index.html')
dstpath = os.path.join(outdir, 'index.html')
tools = GetPlatforms(toolchains, desc['TOOLS'], first_toolchain)
path = "{tc}/{config}"
replace = {
'title': desc['TITLE'],
'attrs':
'data-name="%s" data-tools="%s" data-configs="%s" data-path="%s"' % (
nmf, ' '.join(tools), ' '.join(configs), path),
}
RunTemplateFileIfChanged(srcpath, dstpath, replace)
def GenerateManifest(srcroot, dstroot, desc):
outdir = os.path.join(dstroot, desc['DEST'], desc['NAME'])
srcpath = os.path.join(SDK_RESOURCE_DIR, 'manifest.json.template')
dstpath = os.path.join(outdir, 'manifest.json')
permissions = desc.get('PERMISSIONS', [])
combined_permissions = list(permissions)
socket_permissions = desc.get('SOCKET_PERMISSIONS', [])
if socket_permissions:
combined_permissions.append({'socket': socket_permissions})
filesystem_permissions = desc.get('FILESYSTEM_PERMISSIONS', [])
if filesystem_permissions:
combined_permissions.append({'fileSystem': filesystem_permissions})
pretty_permissions = json.dumps(combined_permissions,
sort_keys=True, indent=4)
replace = {
'name': desc['TITLE'],
'description': '%s Example' % desc['TITLE'],
'key': True,
'channel': None,
'permissions': pretty_permissions,
'multi_platform': desc.get('MULTI_PLATFORM', False),
'version': build_version.ChromeVersionNoTrunk(),
'min_chrome_version': desc.get('MIN_CHROME_VERSION')
}
RunTemplateFileIfChanged(srcpath, dstpath, replace)
def FindAndCopyFiles(src_files, root, search_dirs, dst_dir):
buildbot_common.MakeDir(dst_dir)
for src_name in src_files:
src_file = FindFile(src_name, root, search_dirs)
if not src_file:
ErrorExit('Failed to find: ' + src_name)
dst_file = os.path.join(dst_dir, src_name)
if os.path.exists(dst_file):
if os.stat(src_file).st_mtime <= os.stat(dst_file).st_mtime:
Trace('Skipping "%s", destination "%s" is newer.' % (
src_file, dst_file))
continue
dst_path = os.path.dirname(dst_file)
if not os.path.exists(dst_path):
buildbot_common.MakeDir(dst_path)
buildbot_common.CopyFile(src_file, dst_file)
def ModifyDescInPlace(desc):
"""Perform post-load processing on .dsc file data.
Currently this consists of:
- Add -Wall to CXXFLAGS
"""
for target in desc['TARGETS']:
target.setdefault('CXXFLAGS', [])
target['CXXFLAGS'].insert(0, '-Wall')
def ProcessProject(pepperdir, srcroot, dstroot, desc, toolchains, configs=None,
first_toolchain=False):
if not configs:
configs = ['Debug', 'Release']
name = desc['NAME']
out_dir = os.path.join(dstroot, desc['DEST'], name)
buildbot_common.MakeDir(out_dir)
srcdirs = desc.get('SEARCH', ['.', SDK_RESOURCE_DIR])
# Copy sources to example directory
sources = GenerateSourceCopyList(desc)
FindAndCopyFiles(sources, srcroot, srcdirs, out_dir)
# Copy public headers to the include directory.
for headers_set in desc.get('HEADERS', []):
headers = headers_set['FILES']
header_out_dir = os.path.join(dstroot, headers_set['DEST'])
FindAndCopyFiles(headers, srcroot, srcdirs, header_out_dir)
make_path = os.path.join(out_dir, 'Makefile')
outdir = os.path.dirname(os.path.abspath(make_path))
if getos.GetPlatform() == 'win':
AddMakeBat(pepperdir, outdir)
# If this project has no TARGETS, then we don't need to generate anything.
if 'TARGETS' not in desc:
return (name, desc['DEST'])
if IsNexe(desc):
template = os.path.join(SDK_RESOURCE_DIR, 'Makefile.example.template')
else:
template = os.path.join(SDK_RESOURCE_DIR, 'Makefile.library.template')
# Ensure the order of |tools| is the same as toolchains; that way if
# first_toolchain is set, it will choose based on the order of |toolchains|.
tools = [tool for tool in toolchains if tool in desc['TOOLS']]
if first_toolchain:
tools = [tools[0]]
ModifyDescInPlace(desc)
template_dict = {
'desc': desc,
'rel_sdk': '/'.join(['..'] * (len(desc['DEST'].split('/')) + 1)),
'pre': desc.get('PRE', ''),
'post': desc.get('POST', ''),
'tools': tools,
'sel_ldr': desc.get('SEL_LDR'),
'targets': desc['TARGETS'],
'multi_platform': desc.get('MULTI_PLATFORM', False),
}
RunTemplateFileIfChanged(template, make_path, template_dict)
if IsExample(desc):
ProcessHTML(srcroot, dstroot, desc, toolchains, configs,
first_toolchain)
if not desc.get('NO_PACKAGE_FILES'):
GenerateManifest(srcroot, dstroot, desc)
return (name, desc['DEST'])
def GenerateMasterMakefile(pepperdir, out_path, targets, deps):
"""Generate a Master Makefile that builds all examples.
Args:
pepperdir: NACL_SDK_ROOT
out_path: Root for output such that out_path+NAME = full path
targets: List of targets names
"""
in_path = os.path.join(SDK_RESOURCE_DIR, 'Makefile.index.template')
out_path = os.path.join(out_path, 'Makefile')
rel_path = os.path.relpath(pepperdir, os.path.dirname(out_path))
template_dict = {
'projects': targets,
'deps' : deps,
'rel_sdk' : rel_path,
}
RunTemplateFileIfChanged(in_path, out_path, template_dict)
outdir = os.path.dirname(os.path.abspath(out_path))
if getos.GetPlatform() == 'win':
AddMakeBat(pepperdir, outdir)
| bsd-3-clause |
zhengbli/TypeScript-Sublime-Plugin | typescript/libs/logger.py | 5 | 1321 | """
Exposes logging and debugging operations.
Use the 'debug', 'info', 'warning', 'error', or 'critial' methods on the 'log'
object to send messages to the stderr (which appear in the console in Sublime).
A log file is also created in the plugin folder for messages at the level set
by the properties below.
"""
import logging
from os import path
from .global_vars import LOG_CONSOLE_LEVEL, LOG_FILE_LEVEL
# The default path to the log file created for diagnostic output
_pluginRoot = path.dirname(path.dirname(path.abspath(__file__)))
filePath = path.join(_pluginRoot, 'TS.log')
log = logging.getLogger('TS')
log.setLevel(logging.DEBUG)
_logFormat = logging.Formatter('%(asctime)s: %(thread)d: %(levelname)s: %(message)s')
logFile = logging.FileHandler(filePath, mode='w')
logFile.setLevel(logging.DEBUG)
logFile.setFormatter(_logFormat)
log.addHandler(logFile)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(_logFormat)
log.addHandler(console)
log.info('Logging configured to log to file: {0}'.format(filePath))
def view_debug(view, message):
filename = view.file_name()
view_name = view.name()
name = view_name if filename is None else filename
log.debug(message + ": " + name)
logFile.setLevel(LOG_FILE_LEVEL)
console.setLevel(LOG_CONSOLE_LEVEL)
| apache-2.0 |
armersong/zato | code/alembic/versions/0011_1500abb1cf3_gh184_bring_up_to_date.py | 7 | 12953 | """gh184 Bring Alembic migrations up to date
Revision ID: 0011_1500abb1cf3
Revises: 0010_3f03ae0ef253
Create Date: 2014-04-11 09:25:03.206296
"""
# revision identifiers, used by Alembic.
revision = '0011_1500abb1cf3'
down_revision = '0010_3f03ae0ef253'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.schema import CreateSequence, DropSequence
# Zato
from zato.common.odb import model
from zato.common import CLOUD, HTTP_SOAP_SERIALIZATION_TYPE, MISC, PUB_SUB
# ################################################################################################################################
add_col = op.add_column
def alter_column_nullable_false(table_name, column_name, default_value, column_type):
column = sa.sql.table(table_name, sa.sql.column(column_name))
op.execute(column.update().values({column_name:default_value}))
op.alter_column(table_name, column_name, type_=column_type, existing_type=column_type, nullable=False)
def upgrade():
op.create_unique_constraint(
'sec_base_cluster_id_username_sec_type_key', model.SecurityBase.__tablename__, ['cluster_id', 'username', 'sec_type'])
op.create_table(
model.NTLM.__tablename__,sa.Column('id', sa.Integer(), sa.ForeignKey('sec_base.id'), primary_key=True)
)
op.create_table(
model.APIKeySecurity.__tablename__,sa.Column('id', sa.Integer(), sa.ForeignKey('sec_base.id'), primary_key=True)
)
op.create_table(
model.XPathSecurity.__tablename__,
sa.Column('id', sa.Integer(), sa.ForeignKey('sec_base.id'), primary_key=True),
sa.Column('username_expr', sa.String(200), nullable=False),
sa.Column('password_expr', sa.String(200), nullable=True),
)
add_col(
model.HTTPSOAP.__tablename__, sa.Column('serialization_type', sa.String(200), nullable=True))
alter_column_nullable_false(
model.HTTPSOAP.__tablename__, 'serialization_type', HTTP_SOAP_SERIALIZATION_TYPE.SUDS.id, sa.String(200))
add_col(
model.HTTPSOAP.__tablename__, sa.Column('timeout', sa.Integer(), nullable=True))
alter_column_nullable_false(
model.HTTPSOAP.__tablename__, 'timeout', MISC.DEFAULT_HTTP_TIMEOUT, sa.Integer())
op.create_table(
model.AWSSecurity.__tablename__, sa.Column('id', sa.Integer(), sa.ForeignKey('sec_base.id'), primary_key=True))
op.execute(CreateSequence(sa.Sequence('deliv_def_seq')))
op.execute(CreateSequence(sa.Sequence('deliv_seq')))
op.execute(CreateSequence(sa.Sequence('deliv_payl_seq')))
op.execute(CreateSequence(sa.Sequence('msg_ns_seq')))
op.execute(CreateSequence(sa.Sequence('http_soap_audit_seq')))
op.create_table(
model.HTTSOAPAudit.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('http_soap_audit_seq'), primary_key=True),
sa.Column('name', sa.String(200), nullable=False, index=True),
sa.Column('cid', sa.String(200), nullable=False, index=True),
sa.Column('transport', sa.String(200), nullable=False, index=True),
sa.Column('connection', sa.String(200), nullable=False, index=True),
sa.Column('req_time', sa.DateTime(), nullable=False),
sa.Column('resp_time', sa.DateTime(), nullable=True),
sa.Column('user_token', sa.String(200), nullable=True, index=True),
sa.Column('invoke_ok', sa.Boolean, nullable=True),
sa.Column('auth_ok', sa.Boolean, nullable=True),
sa.Column('remote_addr', sa.String(200), nullable=False, index=True),
sa.Column('req_headers', sa.LargeBinary(), nullable=True),
sa.Column('req_payload', sa.LargeBinary(), nullable=True),
sa.Column('resp_headers', sa.LargeBinary(), nullable=True),
sa.Column('resp_payload', sa.LargeBinary(), nullable=True),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.Column('conn_id', sa.Integer(), sa.ForeignKey('http_soap.id', ondelete='CASCADE'), nullable=False),
)
op.execute(CreateSequence(sa.Sequence('htp_sp_ad_rpl_p_jp_seq')))
op.create_table(
model.HTTSOAPAuditReplacePatternsJSONPointer.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('htp_sp_ad_rpl_p_jp_seq'), primary_key=True),
sa.Column('conn_id', sa.Integer, sa.ForeignKey('http_soap.id', ondelete='CASCADE'), nullable=False),
sa.Column('pattern_id', sa.Integer, sa.ForeignKey('msg_json_pointer.id', ondelete='CASCADE'), nullable=False),
sa.Column('cluster_id', sa.Integer, sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('conn_id','pattern_id')
)
op.execute(CreateSequence(sa.Sequence('htp_sp_ad_rpl_p_xp_seq')))
op.create_table(
model.HTTSOAPAuditReplacePatternsXPath.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('htp_sp_ad_rpl_p_xp_seq'), primary_key=True),
sa.Column('conn_id', sa.Integer(), sa.ForeignKey('http_soap.id', ondelete='CASCADE'), nullable=False),
sa.Column('pattern_id', sa.Integer(), sa.ForeignKey('msg_xpath.id', ondelete='CASCADE'), nullable=False),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('conn_id','pattern_id')
)
op.execute(CreateSequence(sa.Sequence('pub_sub_topic_seq')))
op.create_table(
model.PubSubTopic.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('pub_sub_topic_seq'), primary_key=True),
sa.Column('name', sa.String(200), nullable=False),
sa.Column('is_active', sa.Boolean, nullable=False),
sa.Column('max_depth', sa.Integer(), nullable=False),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('name','cluster_id')
)
op.execute(CreateSequence(sa.Sequence('pub_sub_cons_seq')))
op.create_table(
model.PubSubConsumer.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('pub_sub_cons_seq'), primary_key=True),
sa.Column('is_active', sa.Boolean, nullable=False),
sa.Column('sub_key', sa.String(200), nullable=False),
sa.Column('max_backlog', sa.Integer(), nullable=False),
sa.Column('delivery_mode', sa.String(200), nullable=False),
sa.Column('callback_id', sa.Integer(), sa.ForeignKey('http_soap.id', ondelete='CASCADE'), nullable=True),
sa.Column('callback_type', sa.String(20), nullable=True, default=PUB_SUB.CALLBACK_TYPE.OUTCONN_PLAIN_HTTP),
sa.Column('topic_id', sa.Integer(), sa.ForeignKey('pub_sub_topic.id', ondelete='CASCADE'), nullable=False),
sa.Column('sec_def_id', sa.Integer(), sa.ForeignKey('sec_base.id', ondelete='CASCADE'), nullable=False),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('sec_def_id','topic_id','cluster_id')
)
op.create_table(
model.PubSubProducer.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('pub_sub_cons_seq'), primary_key=True),
sa.Column('is_active', sa.Boolean, nullable=False),
sa.Column('topic_id', sa.Integer(), sa.ForeignKey('pub_sub_topic.id', ondelete='CASCADE'), nullable=False),
sa.Column('sec_def_id', sa.Integer(), sa.ForeignKey('sec_base.id', ondelete='CASCADE'), nullable=False),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('sec_def_id','topic_id','cluster_id')
)
op.execute(CreateSequence(sa.Sequence('os_swift_seq')))
op.create_table(
model.OpenStackSwift.__tablename__,
sa.Column('id', sa.Integer(), sa.Sequence('os_swift_seq'), primary_key=True),
sa.Column('name', sa.String(200), nullable=False),
sa.Column('is_active', sa.Boolean, nullable=False),
sa.Column('auth_url', sa.String(200), nullable=False),
sa.Column('auth_version', sa.String(200), nullable=False, default=CLOUD.OPENSTACK.SWIFT.DEFAULTS.AUTH_VERSION),
sa.Column('user', sa.String(200), nullable=True),
sa.Column('key', sa.String(200), nullable=True),
sa.Column('retries', sa.Integer, nullable=False, default=CLOUD.OPENSTACK.SWIFT.DEFAULTS.RETRIES),
sa.Column('is_snet', sa.Boolean, nullable=False),
sa.Column('starting_backoff', sa.Integer(), nullable=False, default=CLOUD.OPENSTACK.SWIFT.DEFAULTS.BACKOFF_STARTING),
sa.Column('max_backoff', sa.Integer(), nullable=False, default=CLOUD.OPENSTACK.SWIFT.DEFAULTS.BACKOFF_MAX),
sa.Column('tenant_name', sa.String(200), nullable=True),
sa.Column('should_validate_cert', sa.Boolean, nullable=False),
sa.Column('cacert', sa.String(200), nullable=True),
sa.Column('should_retr_ratelimit', sa.Boolean, nullable=False),
sa.Column('needs_tls_compr', sa.Boolean, nullable=False),
sa.Column('custom_options', sa.String(2000), nullable=True),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('name','cluster_id')
)
op.execute(CreateSequence(sa.Sequence('aws_s3_seq')))
op.create_table(
model.AWSS3.__tablename__,
sa.Column('id', sa.Integer, sa.Sequence('aws_s3_seq'), primary_key=True),
sa.Column('name', sa.String(200), nullable=False),
sa.Column('is_active', sa.Boolean, nullable=False),
sa.Column('pool_size', sa.Integer(), nullable=False, default=CLOUD.AWS.S3.DEFAULTS.POOL_SIZE),
sa.Column('address', sa.String(200), nullable=False, default=CLOUD.AWS.S3.DEFAULTS.ADDRESS),
sa.Column('debug_level', sa.Integer, nullable=False, default=CLOUD.AWS.S3.DEFAULTS.DEBUG_LEVEL),
sa.Column('suppr_cons_slashes', sa.Boolean, nullable=False, default=True),
sa.Column('content_type', sa.String(200), nullable=False, default=CLOUD.AWS.S3.DEFAULTS.CONTENT_TYPE),
sa.Column('metadata_', sa.String(2000), nullable=True),
sa.Column('bucket', sa.String(2000), nullable=True),
sa.Column('encrypt_at_rest', sa.Boolean, nullable=False, default=False),
sa.Column('storage_class', sa.String(200), nullable=False, default=CLOUD.AWS.S3.STORAGE_CLASS.DEFAULT),
sa.Column('security_id', sa.Integer(), sa.ForeignKey('sec_base.id', ondelete='CASCADE'), nullable=False),
sa.Column('cluster_id', sa.Integer(), sa.ForeignKey('cluster.id', ondelete='CASCADE'), nullable=False),
sa.UniqueConstraint('name','cluster_id')
)
op.alter_column(model.Cluster.__tablename__, 'odb_host', nullable=True)
op.alter_column(model.Cluster.__tablename__, 'odb_port', nullable=True)
op.alter_column(model.Cluster.__tablename__, 'odb_user', nullable=True)
op.alter_column(model.Cluster.__tablename__, 'odb_db_name', nullable=True)
def downgrade():
op.drop_constraint('sec_base_cluster_id_username_sec_type_key', model.SecurityBase.__tablename__)
op.drop_table(model.NTLM.__tablename__)
op.drop_table(model.AWSSecurity.__tablename__)
op.drop_table(model.APIKeySecurity.__tablename__)
op.drop_table(model.XPathSecurity.__tablename__)
op.execute(DropSequence(sa.Sequence('deliv_payl_seq')))
op.execute(DropSequence(sa.Sequence('msg_ns_seq')))
op.drop_table(model.HTTSOAPAudit.__tablename__)
op.execute(DropSequence(sa.Sequence('http_soap_audit_seq')))
op.drop_table(model.HTTSOAPAuditReplacePatternsJSONPointer.__tablename__)
op.execute(DropSequence(sa.Sequence('htp_sp_ad_rpl_p_jp_seq')))
op.drop_table(model.HTTSOAPAuditReplacePatternsXPath.__tablename__)
op.execute(DropSequence(sa.Sequence('htp_sp_ad_rpl_p_xp_seq')))
op.drop_table(model.PubSubConsumer.__tablename__)
op.execute(DropSequence(sa.Sequence('pub_sub_cons_seq')))
op.drop_table(model.PubSubProducer.__tablename__)
op.drop_table(model.OpenStackSwift.__tablename__)
op.execute(DropSequence(sa.Sequence('os_swift_seq')))
op.drop_table(model.AWSS3.__tablename__)
op.execute(DropSequence(sa.Sequence('aws_s3_seq')))
op.execute(DropSequence(sa.Sequence('deliv_seq')))
op.drop_table(model.PubSubTopic.__tablename__)
op.execute(DropSequence(sa.Sequence('pub_sub_topic_seq')))
op.execute(DropSequence(sa.Sequence('deliv_def_seq')))
op.drop_column(model.HTTPSOAP.__tablename__, 'serialization_type')
op.drop_column(model.HTTPSOAP.__tablename__, 'timeout')
op.alter_column(model.Cluster.__tablename__, 'odb_host', nullable=False)
op.alter_column(model.Cluster.__tablename__, 'odb_port', nullable=False)
op.alter_column(model.Cluster.__tablename__, 'odb_user', nullable=False)
op.alter_column(model.Cluster.__tablename__, 'odb_db_name', nullable=False)
| gpl-3.0 |
ganeshnalawade/ansible-modules-core | network/nxos/nxos_udld_interface.py | 5 | 15769 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_udld_interface
version_added: "2.2"
short_description: Manages UDLD interface configuration params.
description:
- Manages UDLD interface configuration params.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
notes:
- Feature UDLD must be enabled on the device to use this module.
options:
mode:
description:
- Manages UDLD mode for an interface.
required: true
choices: ['enabled','disabled','aggressive']
interface:
description:
- FULL name of the interface, i.e. Ethernet1/1-
required: true
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure Ethernet1/1 is configured to be in aggressive mode
- nxos_udld_interface:
interface: Ethernet1/1
mode: aggressive
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Remove the aggressive config only if it's currently in aggressive mode and then disable udld (switch default)
- nxos_udld_interface:
interface: Ethernet1/1
mode: aggressive
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# ensure Ethernet1/1 has aggressive mode enabled
- nxos_udld_interface:
interface: Ethernet1/1
mode: enabled
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"mode": "enabled"}
existing:
description:
- k/v pairs of existing configuration
type: dict
sample: {"mode": "aggressive"}
end_state:
description: k/v pairs of configuration after module execution
returned: always
type: dict
sample: {"mode": "enabled"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["interface ethernet1/33",
"no udld aggressive ; no udld disable"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def get_cli_body_ssh(command, response, module):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when issuing commands containing 'show run'.
"""
if 'xml' in response[0] or response[0] == '\n':
body = []
elif 'show run' in command:
body = response
else:
try:
body = [json.loads(response[0])]
except ValueError:
module.fail_json(msg='Command does not support JSON output',
command=command)
return body
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh(command, response, module)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_udld_interface(module, interface):
command = 'show udld {0}'.format(interface)
interface_udld = {}
mode = None
try:
body = execute_show_command(command, module)[0]
table = body['TABLE_interface']['ROW_interface']
status = str(table.get('mib-port-status', None))
# Note: 'mib-aggresive-mode' is NOT a typo
agg = str(table.get('mib-aggresive-mode', 'disabled'))
if agg == 'enabled':
mode = 'aggressive'
else:
mode = status
interface_udld['mode'] = mode
except (KeyError, AttributeError, IndexError):
interface_udld = {}
return interface_udld
def is_interface_copper(module, interface):
command = 'show interface status'
copper = []
try:
body = execute_show_command(command, module)[0]
table = body['TABLE_interface']['ROW_interface']
for each in table:
itype = each.get('type', 'DNE')
if 'CU' in itype or '1000' in itype or '10GBaseT' in itype:
copper.append(str(each['interface'].lower()))
except (KeyError, AttributeError):
pass
if interface in copper:
found = True
else:
found = False
return found
def get_commands_config_udld_interface(delta, interface, module, existing):
commands = []
copper = is_interface_copper(module, interface)
if delta:
mode = delta['mode']
if mode == 'aggressive':
command = 'udld aggressive'
elif copper:
if mode == 'enabled':
if existing['mode'] == 'aggressive':
command = 'no udld aggressive ; udld enable'
else:
command = 'udld enable'
elif mode == 'disabled':
command = 'no udld enable'
elif not copper:
if mode == 'enabled':
if existing['mode'] == 'aggressive':
command = 'no udld aggressive ; no udld disable'
else:
command = 'no udld disable'
elif mode == 'disabled':
command = 'udld disable'
if command:
commands.append(command)
commands.insert(0, 'interface {0}'.format(interface))
return commands
def get_commands_remove_udld_interface(delta, interface, module, existing):
commands = []
copper = is_interface_copper(module, interface)
if delta:
mode = delta['mode']
if mode == 'aggressive':
command = 'no udld aggressive'
elif copper:
if mode == 'enabled':
command = 'no udld enable'
elif mode == 'disabled':
command = 'udld enable'
elif not copper:
if mode == 'enabled':
command = 'udld disable'
elif mode == 'disabled':
command = 'no udld disable'
if command:
commands.append(command)
commands.insert(0, 'interface {0}'.format(interface))
return commands
def main():
argument_spec = dict(
mode=dict(choices=['enabled', 'disabled', 'aggressive'],
required=True),
interface=dict(type='str', required=True),
state=dict(choices=['absent', 'present'], default='present'),
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
interface = module.params['interface'].lower()
mode = module.params['mode']
state = module.params['state']
proposed = dict(mode=mode)
existing = get_udld_interface(module, interface)
end_state = existing
delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
changed = False
commands = []
if state == 'present':
if delta:
command = get_commands_config_udld_interface(delta, interface,
module, existing)
commands.append(command)
elif state == 'absent':
common = set(proposed.iteritems()).intersection(existing.iteritems())
if common:
command = get_commands_remove_udld_interface(
dict(common), interface, module, existing
)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
execute_config_command(cmds, module)
end_state = get_udld_interface(module, interface)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
deberon/ansible-modules-extras | packaging/os/layman.py | 41 | 6970 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Jakub Jirutka <jakub@jirutka.cz>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import shutil
from os import path
from urllib2 import Request, urlopen, URLError
DOCUMENTATION = '''
---
module: layman
author: Jakub Jirutka
version_added: "1.6"
short_description: Manage Gentoo overlays
description:
- Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux.
Please note that Layman must be installed on a managed node prior using this module.
options:
name:
description:
- The overlay id to install, synchronize, or uninstall.
Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)).
required: true
list_url:
description:
- An URL of the alternative overlays list that defines the overlay to install.
This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where
C(overlay_defs) is readed from the Layman's configuration.
required: false
state:
description:
- Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay.
required: false
default: present
choices: [present, absent, updated]
'''
EXAMPLES = '''
# Install the overlay 'mozilla' which is on the central overlays list.
- layman: name=mozilla
# Install the overlay 'cvut' from the specified alternative list.
- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml
# Update (sync) the overlay 'cvut', or install if not installed yet.
- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml state=updated
# Update (sync) all of the installed overlays.
- layman: name=ALL state=updated
# Uninstall the overlay 'cvut'.
- layman: name=cvut state=absent
'''
USERAGENT = 'ansible-httpget'
try:
from layman.api import LaymanAPI
from layman.config import BareConfig
HAS_LAYMAN_API = True
except ImportError:
HAS_LAYMAN_API = False
class ModuleError(Exception): pass
def init_layman(config=None):
'''Returns the initialized ``LaymanAPI``.
:param config: the layman's configuration to use (optional)
'''
if config is None: config = BareConfig(read_configfile=True, quietness=1)
return LaymanAPI(config)
def download_url(url, dest):
'''
:param url: the URL to download
:param dest: the absolute path of where to save the downloaded content to;
it must be writable and not a directory
:raises ModuleError
'''
request = Request(url)
request.add_header('User-agent', USERAGENT)
try:
response = urlopen(request)
except URLError, e:
raise ModuleError("Failed to get %s: %s" % (url, str(e)))
try:
with open(dest, 'w') as f:
shutil.copyfileobj(response, f)
except IOError, e:
raise ModuleError("Failed to write: %s" % str(e))
def install_overlay(name, list_url=None):
'''Installs the overlay repository. If not on the central overlays list,
then :list_url of an alternative list must be provided. The list will be
fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the
``overlay_defs`` is read from the Layman's configuration).
:param name: the overlay id
:param list_url: the URL of the remote repositories list to look for the overlay
definition (optional, default: None)
:returns: True if the overlay was installed, or False if already exists
(i.e. nothing has changed)
:raises ModuleError
'''
# read Layman configuration
layman_conf = BareConfig(read_configfile=True)
layman = init_layman(layman_conf)
if layman.is_installed(name):
return False
if not layman.is_repo(name):
if not list_url: raise ModuleError("Overlay '%s' is not on the list of known " \
"overlays and URL of the remote list was not provided." % name)
overlay_defs = layman_conf.get_option('overlay_defs')
dest = path.join(overlay_defs, name + '.xml')
download_url(list_url, dest)
# reload config
layman = init_layman()
if not layman.add_repos(name): raise ModuleError(layman.get_errors())
return True
def uninstall_overlay(name):
'''Uninstalls the given overlay repository from the system.
:param name: the overlay id to uninstall
:returns: True if the overlay was uninstalled, or False if doesn't exist
(i.e. nothing has changed)
:raises ModuleError
'''
layman = init_layman()
if not layman.is_installed(name):
return False
layman.delete_repos(name)
if layman.get_errors(): raise ModuleError(layman.get_errors())
return True
def sync_overlay(name):
'''Synchronizes the specified overlay repository.
:param name: the overlay repository id to sync
:raises ModuleError
'''
layman = init_layman()
if not layman.sync(name):
messages = [ str(item[1]) for item in layman.sync_results[2] ]
raise ModuleError(messages)
def sync_overlays():
'''Synchronize all of the installed overlays.
:raises ModuleError
'''
layman = init_layman()
for name in layman.get_installed():
sync_overlay(name)
def main():
# define module
module = AnsibleModule(
argument_spec = {
'name': { 'required': True },
'list_url': { 'aliases': ['url'] },
'state': { 'default': "present", 'choices': ['present', 'absent', 'updated'] },
}
)
if not HAS_LAYMAN_API:
module.fail_json(msg='Layman is not installed')
state, name, url = (module.params[key] for key in ['state', 'name', 'list_url'])
changed = False
try:
if state == 'present':
changed = install_overlay(name, url)
elif state == 'updated':
if name == 'ALL':
sync_overlays()
elif install_overlay(name, url):
changed = True
else:
sync_overlay(name)
else:
changed = uninstall_overlay(name)
except ModuleError, e:
module.fail_json(msg=e.message)
else:
module.exit_json(changed=changed, name=name)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
azaghal/ansible | lib/ansible/cli/config.py | 47 | 7576 | # Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import shlex
import subprocess
import yaml
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.config.manager import ConfigManager, Setting, find_ini_config_file
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils._text import to_native, to_text, to_bytes
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.utils.color import stringc
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath
display = Display()
class ConfigCLI(CLI):
""" Config command line class """
def __init__(self, args, callback=None):
self.config_file = None
self.config = None
super(ConfigCLI, self).__init__(args, callback)
def init_parser(self):
super(ConfigCLI, self).init_parser(
desc="View ansible configuration.",
)
common = opt_help.argparse.ArgumentParser(add_help=False)
opt_help.add_verbosity_options(common)
common.add_argument('-c', '--config', dest='config_file',
help="path to configuration file, defaults to first file found in precedence.")
subparsers = self.parser.add_subparsers(dest='action')
subparsers.required = True
list_parser = subparsers.add_parser('list', help='Print all config options', parents=[common])
list_parser.set_defaults(func=self.execute_list)
dump_parser = subparsers.add_parser('dump', help='Dump configuration', parents=[common])
dump_parser.set_defaults(func=self.execute_dump)
dump_parser.add_argument('--only-changed', dest='only_changed', action='store_true',
help="Only show configurations that have changed from the default")
view_parser = subparsers.add_parser('view', help='View configuration file', parents=[common])
view_parser.set_defaults(func=self.execute_view)
# update_parser = subparsers.add_parser('update', help='Update configuration option')
# update_parser.set_defaults(func=self.execute_update)
# update_parser.add_argument('-s', '--setting', dest='setting',
# help="config setting, the section defaults to 'defaults'",
# metavar='[section.]setting=value')
# search_parser = subparsers.add_parser('search', help='Search configuration')
# search_parser.set_defaults(func=self.execute_search)
# search_parser.add_argument('args', help='Search term', metavar='<search term>')
def post_process_args(self, options):
options = super(ConfigCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def run(self):
super(ConfigCLI, self).run()
if context.CLIARGS['config_file']:
self.config_file = unfrackpath(context.CLIARGS['config_file'], follow=False)
b_config = to_bytes(self.config_file)
if os.path.exists(b_config) and os.access(b_config, os.R_OK):
self.config = ConfigManager(self.config_file)
else:
raise AnsibleOptionsError('The provided configuration file is missing or not accessible: %s' % to_native(self.config_file))
else:
self.config = ConfigManager()
self.config_file = find_ini_config_file()
if self.config_file:
try:
if not os.path.exists(self.config_file):
raise AnsibleOptionsError("%s does not exist or is not accessible" % (self.config_file))
elif not os.path.isfile(self.config_file):
raise AnsibleOptionsError("%s is not a valid file" % (self.config_file))
os.environ['ANSIBLE_CONFIG'] = to_native(self.config_file)
except Exception:
if context.CLIARGS['action'] in ['view']:
raise
elif context.CLIARGS['action'] in ['edit', 'update']:
display.warning("File does not exist, used empty file: %s" % self.config_file)
elif context.CLIARGS['action'] == 'view':
raise AnsibleError('Invalid or no config file was supplied')
context.CLIARGS['func']()
def execute_update(self):
'''
Updates a single setting in the specified ansible.cfg
'''
raise AnsibleError("Option not implemented yet")
# pylint: disable=unreachable
if context.CLIARGS['setting'] is None:
raise AnsibleOptionsError("update option requires a setting to update")
(entry, value) = context.CLIARGS['setting'].split('=')
if '.' in entry:
(section, option) = entry.split('.')
else:
section = 'defaults'
option = entry
subprocess.call([
'ansible',
'-m', 'ini_file',
'localhost',
'-c', 'local',
'-a', '"dest=%s section=%s option=%s value=%s backup=yes"' % (self.config_file, section, option, value)
])
def execute_view(self):
'''
Displays the current config file
'''
try:
with open(self.config_file, 'rb') as f:
self.pager(to_text(f.read(), errors='surrogate_or_strict'))
except Exception as e:
raise AnsibleError("Failed to open config file: %s" % to_native(e))
def execute_edit(self):
'''
Opens ansible.cfg in the default EDITOR
'''
raise AnsibleError("Option not implemented yet")
# pylint: disable=unreachable
try:
editor = shlex.split(os.environ.get('EDITOR', 'vi'))
editor.append(self.config_file)
subprocess.call(editor)
except Exception as e:
raise AnsibleError("Failed to open editor: %s" % to_native(e))
def execute_list(self):
'''
list all current configs reading lib/constants.py and shows env and config file setting names
'''
self.pager(to_text(yaml.dump(self.config.get_configuration_definitions(), Dumper=AnsibleDumper), errors='surrogate_or_strict'))
def execute_dump(self):
'''
Shows the current settings, merges ansible.cfg if specified
'''
# FIXME: deal with plugins, not just base config
text = []
defaults = self.config.get_configuration_definitions().copy()
for setting in self.config.data.get_settings():
if setting.name in defaults:
defaults[setting.name] = setting
for setting in sorted(defaults):
if isinstance(defaults[setting], Setting):
if defaults[setting].origin == 'default':
color = 'green'
else:
color = 'yellow'
msg = "%s(%s) = %s" % (setting, defaults[setting].origin, defaults[setting].value)
else:
color = 'green'
msg = "%s(%s) = %s" % (setting, 'default', defaults[setting].get('default'))
if not context.CLIARGS['only_changed'] or color == 'yellow':
text.append(stringc(msg, color))
self.pager(to_text('\n'.join(text), errors='surrogate_or_strict'))
| gpl-3.0 |
abagh0703/RetailTrail | flask/lib/python2.7/site-packages/setuptools/command/develop.py | 477 | 6447 | from setuptools.command.easy_install import easy_install
from distutils.util import convert_path, subst_vars
from pkg_resources import Distribution, PathMetadata, normalize_path
from distutils import log
from distutils.errors import DistutilsError, DistutilsOptionError
import os, sys, setuptools, glob
class develop(easy_install):
"""Set up package for development"""
description = "install package in 'development mode'"
user_options = easy_install.user_options + [
("uninstall", "u", "Uninstall this source package"),
("egg-path=", None, "Set the path to be used in the .egg-link file"),
]
boolean_options = easy_install.boolean_options + ['uninstall']
command_consumes_arguments = False # override base
def run(self):
if self.uninstall:
self.multi_version = True
self.uninstall_link()
else:
self.install_for_development()
self.warn_deprecated_options()
def initialize_options(self):
self.uninstall = None
self.egg_path = None
easy_install.initialize_options(self)
self.setup_path = None
self.always_copy_from = '.' # always copy eggs installed in curdir
def finalize_options(self):
ei = self.get_finalized_command("egg_info")
if ei.broken_egg_info:
raise DistutilsError(
"Please rename %r to %r before using 'develop'"
% (ei.egg_info, ei.broken_egg_info)
)
self.args = [ei.egg_name]
easy_install.finalize_options(self)
self.expand_basedirs()
self.expand_dirs()
# pick up setup-dir .egg files only: no .egg-info
self.package_index.scan(glob.glob('*.egg'))
self.egg_link = os.path.join(self.install_dir, ei.egg_name+'.egg-link')
self.egg_base = ei.egg_base
if self.egg_path is None:
self.egg_path = os.path.abspath(ei.egg_base)
target = normalize_path(self.egg_base)
if normalize_path(os.path.join(self.install_dir, self.egg_path)) != target:
raise DistutilsOptionError(
"--egg-path must be a relative path from the install"
" directory to "+target
)
# Make a distribution for the package's source
self.dist = Distribution(
target,
PathMetadata(target, os.path.abspath(ei.egg_info)),
project_name = ei.egg_name
)
p = self.egg_base.replace(os.sep,'/')
if p!= os.curdir:
p = '../' * (p.count('/')+1)
self.setup_path = p
p = normalize_path(os.path.join(self.install_dir, self.egg_path, p))
if p != normalize_path(os.curdir):
raise DistutilsOptionError(
"Can't get a consistent path to setup script from"
" installation directory", p, normalize_path(os.curdir))
def install_for_development(self):
if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False):
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
# Fixup egg-link and easy-install.pth
ei_cmd = self.get_finalized_command("egg_info")
self.egg_path = build_path
self.dist.location = build_path
self.dist._provider = PathMetadata(build_path, ei_cmd.egg_info) # XXX
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
self.install_site_py() # ensure that target dir is site-safe
if setuptools.bootstrap_install_from:
self.easy_install(setuptools.bootstrap_install_from)
setuptools.bootstrap_install_from = None
# create an .egg-link in the installation dir, pointing to our egg
log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
if not self.dry_run:
f = open(self.egg_link,"w")
f.write(self.egg_path + "\n" + self.setup_path)
f.close()
# postprocess the installed distro, fixing up .pth, installing scripts,
# and handling requirements
self.process_distribution(None, self.dist, not self.no_deps)
def uninstall_link(self):
if os.path.exists(self.egg_link):
log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
egg_link_file = open(self.egg_link)
contents = [line.rstrip() for line in egg_link_file]
egg_link_file.close()
if contents not in ([self.egg_path], [self.egg_path, self.setup_path]):
log.warn("Link points to %s: uninstall aborted", contents)
return
if not self.dry_run:
os.unlink(self.egg_link)
if not self.dry_run:
self.update_pth(self.dist) # remove any .pth link to us
if self.distribution.scripts:
# XXX should also check for entry point scripts!
log.warn("Note: you must uninstall or replace scripts manually!")
def install_egg_scripts(self, dist):
if dist is not self.dist:
# Installing a dependency, so fall back to normal behavior
return easy_install.install_egg_scripts(self,dist)
# create wrapper scripts in the script dir, pointing to dist.scripts
# new-style...
self.install_wrapper_scripts(dist)
# ...and old-style
for script_name in self.distribution.scripts or []:
script_path = os.path.abspath(convert_path(script_name))
script_name = os.path.basename(script_path)
f = open(script_path,'rU')
script_text = f.read()
f.close()
self.install_script(dist, script_name, script_text, script_path)
| mit |
NischalLal/Humpty-Dumpty-SriGanesh | myblog/migrations/0002_contact_project_socialsite.py | 1 | 1637 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-13 18:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myblog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('message_from_me', models.TextField()),
('subject', models.CharField(max_length=33)),
('message_from_user', models.TextField()),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('link', models.URLField()),
('image', models.ImageField(default=None, upload_to='myblog/image/project')),
('detail', models.TextField()),
('created_on', models.DateTimeField()),
],
),
migrations.CreateModel(
name='SocialSite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('site_name', models.CharField(max_length=10)),
('link', models.URLField()),
],
options={
'verbose_name_plural': 'Social Sites',
},
),
]
| bsd-3-clause |
ccwang002/biocloud-server-kai | src/rna_seq/migrations/0003_rnaseqexedetail.py | 1 | 1617 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-03 09:52
from __future__ import unicode_literals
import analyses.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('rna_seq', '0002_auto_20160702_1000'),
]
operations = [
migrations.CreateModel(
name='RNASeqExeDetail',
fields=[
('analysis', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='execution_detail', serialize=False, to='rna_seq.RNASeqModel')),
('stage_qc', analyses.fields.StageStatusField(choices=[('WAITING', 'Waiting'), ('RUNNING', 'Running'), ('SUCCESSFUL', 'Successful'), ('FAILED', 'Failed'), ('SKIPED', 'Skiped')], default='WAITING', max_length=32)),
('stage_alignment', analyses.fields.StageStatusField(choices=[('WAITING', 'Waiting'), ('RUNNING', 'Running'), ('SUCCESSFUL', 'Successful'), ('FAILED', 'Failed'), ('SKIPED', 'Skiped')], default='WAITING', max_length=32)),
('stage_cufflinks', analyses.fields.StageStatusField(choices=[('WAITING', 'Waiting'), ('RUNNING', 'Running'), ('SUCCESSFUL', 'Successful'), ('FAILED', 'Failed'), ('SKIPED', 'Skiped')], default='WAITING', max_length=32)),
('stage_cuffdiff', analyses.fields.StageStatusField(choices=[('WAITING', 'Waiting'), ('RUNNING', 'Running'), ('SUCCESSFUL', 'Successful'), ('FAILED', 'Failed'), ('SKIPED', 'Skiped')], default='WAITING', max_length=32)),
],
),
]
| mit |
open-infinity/health-monitoring | nodechecker/nodechecker/lib/python/nodechecker/config.py | 1 | 2856 | #!/usr/bin/env python2
import ConfigParser
class Config(object):
def __init__(self, conf_file):
config = ConfigParser.SafeConfigParser()
config.read(conf_file)
self.hm_root = config.get('hm', 'root')
self.node_mode = config.get('node', 'mode')
self.node_log_level = config.get('node', 'log_level')
self.node_log_file = config.get('node', 'log_file')
self.node_udp_port = config.get('node', 'udp_port')
self.node_ip_address = config.get('node', 'ip_address')
self.node_ip_address_public = config.get('node', 'ip_address_public')
self.node_instance_id = config.get('node', 'instance_id')
self.node_cluster_id = config.get('node', 'cluster_id')
self.node_machine_id = config.get('node', 'machine_id')
self.node_cloud_zone = config.get('node', 'cloud_zone')
self.node_heartbeat_period = config.get('node', 'heartbeat_period')
self.node_rrd_scan_period = config.get('node', 'rrd_scan_period')
self.node_master_timeout = config.get('node', 'master_timeout')
self.node_dead_node_timeout = config.get('node', 'dead_node_timeout')
self.nodechecker_home = config.get('nodechecker', 'home')
self.notifications_log_file = config.get('notifications', 'log_file')
self.notifications_log_file_max_bytes = config.get('notifications', 'log_file_max_bytes')
self.notifications_log_file_backup_count = config.get('notifications', 'log_file_backup_count')
self.notifications_home_dir = config.get('notifications', 'home_dir')
self.notifications_inbox_dir = config.get('notifications', 'inbox_dir')
self.notifications_sent_dir = config.get('notifications', 'sent_dir')
self.notifications_dead_node_string = config.get('notifications', 'dead_node_string')
self.email_enabled = config.get('email', 'enabled')
self.email_subject = config.get('email', 'subject')
self.email_from = config.get('email', 'from')
self.email_to = config.get('email', 'to')
self.email_smtp_server = config.get('email', 'smtp_server')
self.email_smtp_port = config.get('email', 'smtp_port')
self.email_smtp_username = config.get('email', 'smtp_username')
self.email_smtp_password = config.get('email', 'smtp_password')
self.snmp_enabled = config.get('snmp', 'enabled')
self.snmp_community_string = config.get('snmp', 'community_string')
self.snmp_manager = config.get('snmp', 'manager')
self.collectd_home = config.get('collectd', 'home')
self.collectd_rrd_dir = config.get('collectd', 'rrd_dir')
self.rrd_http_server_home = config.get('rrd_http_server', 'home')
self.pound_home = config.get('pound', 'home')
self.test_dump_dir = config.get('test', 'dump_dir')
| apache-2.0 |
maxive/erp | addons/project_timesheet_holidays/models/hr_holidays.py | 1 | 4228 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
class HolidaysType(models.Model):
_inherit = "hr.leave.type"
def _default_project_id(self):
company = self.company_id if self.company_id else self.env.user.company_id
return company.leave_timesheet_project_id.id
def _default_task_id(self):
company = self.company_id if self.company_id else self.env.user.company_id
return company.leave_timesheet_task_id.id
timesheet_generate = fields.Boolean('Generate Timesheet', default=True, help="If checked, when validating a leave, timesheet will be generated in the Vacation Project of the company.")
timesheet_project_id = fields.Many2one('project.project', string="Internal Project", default=_default_project_id, help="The project will contain the timesheet generated when a leave is validated.")
timesheet_task_id = fields.Many2one('project.task', string="Internal Task for timesheet", default=_default_task_id, domain="[('project_id', '=', timesheet_project_id)]")
@api.onchange('timesheet_generate')
def _onchange_timesheet_generate(self):
if self.timesheet_generate:
company = self.company_id if self.company_id else self.env.user.company_id
self.timesheet_project_id = company.leave_timesheet_project_id
self.timesheet_task_id = company.leave_timesheet_task_id
else:
self.timesheet_project_id = False
self.timesheet_task_id = False
@api.constrains('timesheet_generate')
def _check_timesheet_generate(self):
for holiday_status in self:
if holiday_status.timesheet_generate:
if not holiday_status.timesheet_project_id or not holiday_status.timesheet_task_id:
raise ValidationError(_('For the leaves to generate timesheet, the internal project and task are requried.'))
class Holidays(models.Model):
_inherit = "hr.leave"
timesheet_ids = fields.One2many('account.analytic.line', 'holiday_id', string="Analytic Lines")
def _validate_leave_request(self):
""" Timesheet will be generated on leave validation only if a timesheet_project_id and a
timesheet_task_id are set on the corresponding leave type. The generated timesheet will
be attached to this project/task.
"""
# create the timesheet on the vacation project
for holiday in self.filtered(
lambda request: request.holiday_type == 'employee' and
request.holiday_status_id.timesheet_project_id and
request.holiday_status_id.timesheet_task_id):
holiday_project = holiday.holiday_status_id.timesheet_project_id
holiday_task = holiday.holiday_status_id.timesheet_task_id
work_hours_data = [item for item in holiday.employee_id.iter_work_hours_count(fields.Datetime.from_string(holiday.date_from), fields.Datetime.from_string(holiday.date_to))]
for index, (day_date, work_hours_count) in enumerate(work_hours_data):
self.env['account.analytic.line'].create({
'name': "%s (%s/%s)" % (holiday.name or '', index + 1, len(work_hours_data)),
'project_id': holiday_project.id,
'task_id': holiday_task.id,
'account_id': holiday_project.analytic_account_id.id,
'unit_amount': work_hours_count,
'user_id': holiday.employee_id.user_id.id,
'date': fields.Date.to_string(day_date),
'holiday_id': holiday.id,
'employee_id': holiday.employee_id.id,
})
return super(Holidays, self)._validate_leave_request()
@api.multi
def action_refuse(self):
""" Remove the timesheets linked to the refused holidays """
result = super(Holidays, self).action_refuse()
timesheets = self.sudo().mapped('timesheet_ids')
timesheets.write({'holiday_id': False})
timesheets.unlink()
return result
| agpl-3.0 |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/numpy/lib/_iotools.py | 53 | 31491 | """A collection of functions designed to help I/O with ascii files.
"""
from __future__ import division, absolute_import, print_function
__docformat__ = "restructuredtext en"
import sys
import numpy as np
import numpy.core.numeric as nx
from numpy.compat import asbytes, bytes, asbytes_nested, basestring
if sys.version_info[0] >= 3:
from builtins import bool, int, float, complex, object, str
unicode = str
else:
from __builtin__ import bool, int, float, complex, object, unicode, str
if sys.version_info[0] >= 3:
def _bytes_to_complex(s):
return complex(s.decode('ascii'))
def _bytes_to_name(s):
return s.decode('ascii')
else:
_bytes_to_complex = complex
_bytes_to_name = str
def _is_string_like(obj):
"""
Check whether obj behaves like a string.
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def _is_bytes_like(obj):
"""
Check whether obj behaves like a bytes object.
"""
try:
obj + asbytes('')
except (TypeError, ValueError):
return False
return True
def _to_filehandle(fname, flag='r', return_opened=False):
"""
Returns the filehandle corresponding to a string or a file.
If the string ends in '.gz', the file is automatically unzipped.
Parameters
----------
fname : string, filehandle
Name of the file whose filehandle must be returned.
flag : string, optional
Flag indicating the status of the file ('r' for read, 'w' for write).
return_opened : boolean, optional
Whether to return the opening status of the file.
"""
if _is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fhd = gzip.open(fname, flag)
elif fname.endswith('.bz2'):
import bz2
fhd = bz2.BZ2File(fname)
else:
fhd = file(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fhd = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fhd, opened
return fhd
def has_nested_fields(ndtype):
"""
Returns whether one or several fields of a dtype are nested.
Parameters
----------
ndtype : dtype
Data-type of a structured array.
Raises
------
AttributeError
If `ndtype` does not have a `names` attribute.
Examples
--------
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
>>> np.lib._iotools.has_nested_fields(dt)
False
"""
for name in ndtype.names or ():
if ndtype[name].names:
return True
return False
def flatten_dtype(ndtype, flatten_base=False):
"""
Unpack a structured data-type by collapsing nested fields and/or fields
with a shape.
Note that the field names are lost.
Parameters
----------
ndtype : dtype
The datatype to collapse
flatten_base : {False, True}, optional
Whether to transform a field with a shape into several fields or not.
Examples
--------
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
... ('block', int, (2, 3))])
>>> np.lib._iotools.flatten_dtype(dt)
[dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')]
>>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
[dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'),
dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'),
dtype('int32')]
"""
names = ndtype.names
if names is None:
if flatten_base:
return [ndtype.base] * int(np.prod(ndtype.shape))
return [ndtype.base]
else:
types = []
for field in names:
info = ndtype.fields[field]
flat_dt = flatten_dtype(info[0], flatten_base)
types.extend(flat_dt)
return types
class LineSplitter(object):
"""
Object to split a string at a given delimiter or at given places.
Parameters
----------
delimiter : str, int, or sequence of ints, optional
If a string, character used to delimit consecutive fields.
If an integer or a sequence of integers, width(s) of each field.
comments : str, optional
Character used to mark the beginning of a comment. Default is '#'.
autostrip : bool, optional
Whether to strip each individual field. Default is True.
"""
def autostrip(self, method):
"""
Wrapper to strip each member of the output of `method`.
Parameters
----------
method : function
Function that takes a single argument and returns a sequence of
strings.
Returns
-------
wrapped : function
The result of wrapping `method`. `wrapped` takes a single input
argument and returns a list of strings that are stripped of
white-space.
"""
return lambda input: [_.strip() for _ in method(input)]
#
def __init__(self, delimiter=None, comments=asbytes('#'), autostrip=True):
self.comments = comments
# Delimiter is a character
if isinstance(delimiter, unicode):
delimiter = delimiter.encode('ascii')
if (delimiter is None) or _is_bytes_like(delimiter):
delimiter = delimiter or None
_handyman = self._delimited_splitter
# Delimiter is a list of field widths
elif hasattr(delimiter, '__iter__'):
_handyman = self._variablewidth_splitter
idx = np.cumsum([0] + list(delimiter))
delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
# Delimiter is a single integer
elif int(delimiter):
(_handyman, delimiter) = (
self._fixedwidth_splitter, int(delimiter))
else:
(_handyman, delimiter) = (self._delimited_splitter, None)
self.delimiter = delimiter
if autostrip:
self._handyman = self.autostrip(_handyman)
else:
self._handyman = _handyman
#
def _delimited_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
line = line.strip(asbytes(" \r\n"))
if not line:
return []
return line.split(self.delimiter)
#
def _fixedwidth_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
line = line.strip(asbytes("\r\n"))
if not line:
return []
fixed = self.delimiter
slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
return [line[s] for s in slices]
#
def _variablewidth_splitter(self, line):
if self.comments is not None:
line = line.split(self.comments)[0]
if not line:
return []
slices = self.delimiter
return [line[s] for s in slices]
#
def __call__(self, line):
return self._handyman(line)
class NameValidator(object):
"""
Object to validate a list of strings to use as field names.
The strings are stripped of any non alphanumeric character, and spaces
are replaced by '_'. During instantiation, the user can define a list
of names to exclude, as well as a list of invalid characters. Names in
the exclusion list are appended a '_' character.
Once an instance has been created, it can be called with a list of
names, and a list of valid names will be created. The `__call__`
method accepts an optional keyword "default" that sets the default name
in case of ambiguity. By default this is 'f', so that names will
default to `f0`, `f1`, etc.
Parameters
----------
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default
list ['return', 'file', 'print']. Excluded names are appended an
underscore: for example, `file` becomes `file_` if supplied.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
case_sensitive : {True, False, 'upper', 'lower'}, optional
* If True, field names are case-sensitive.
* If False or 'upper', field names are converted to upper case.
* If 'lower', field names are converted to lower case.
The default value is True.
replace_space : '_', optional
Character(s) used in replacement of white spaces.
Notes
-----
Calling an instance of `NameValidator` is the same as calling its
method `validate`.
Examples
--------
>>> validator = np.lib._iotools.NameValidator()
>>> validator(['file', 'field2', 'with space', 'CaSe'])
['file_', 'field2', 'with_space', 'CaSe']
>>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
deletechars='q',
case_sensitive='False')
>>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
['excl_', 'field2', 'no_', 'with_space', 'case']
"""
#
defaultexcludelist = ['return', 'file', 'print']
defaultdeletechars = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
#
def __init__(self, excludelist=None, deletechars=None,
case_sensitive=None, replace_space='_'):
# Process the exclusion list ..
if excludelist is None:
excludelist = []
excludelist.extend(self.defaultexcludelist)
self.excludelist = excludelist
# Process the list of characters to delete
if deletechars is None:
delete = self.defaultdeletechars
else:
delete = set(deletechars)
delete.add('"')
self.deletechars = delete
# Process the case option .....
if (case_sensitive is None) or (case_sensitive is True):
self.case_converter = lambda x: x
elif (case_sensitive is False) or case_sensitive.startswith('u'):
self.case_converter = lambda x: x.upper()
elif case_sensitive.startswith('l'):
self.case_converter = lambda x: x.lower()
else:
msg = 'unrecognized case_sensitive value %s.' % case_sensitive
raise ValueError(msg)
#
self.replace_space = replace_space
def validate(self, names, defaultfmt="f%i", nbfields=None):
"""
Validate a list of strings as field names for a structured array.
Parameters
----------
names : sequence of str
Strings to be validated.
defaultfmt : str, optional
Default format string, used if validating a given string
reduces its length to zero.
nbfields : integer, optional
Final number of validated names, used to expand or shrink the
initial list of names.
Returns
-------
validatednames : list of str
The list of validated field names.
Notes
-----
A `NameValidator` instance can be called directly, which is the
same as calling `validate`. For examples, see `NameValidator`.
"""
# Initial checks ..............
if (names is None):
if (nbfields is None):
return None
names = []
if isinstance(names, basestring):
names = [names, ]
if nbfields is not None:
nbnames = len(names)
if (nbnames < nbfields):
names = list(names) + [''] * (nbfields - nbnames)
elif (nbnames > nbfields):
names = names[:nbfields]
# Set some shortcuts ...........
deletechars = self.deletechars
excludelist = self.excludelist
case_converter = self.case_converter
replace_space = self.replace_space
# Initializes some variables ...
validatednames = []
seen = dict()
nbempty = 0
#
for item in names:
item = case_converter(item).strip()
if replace_space:
item = item.replace(' ', replace_space)
item = ''.join([c for c in item if c not in deletechars])
if item == '':
item = defaultfmt % nbempty
while item in names:
nbempty += 1
item = defaultfmt % nbempty
nbempty += 1
elif item in excludelist:
item += '_'
cnt = seen.get(item, 0)
if cnt > 0:
validatednames.append(item + '_%d' % cnt)
else:
validatednames.append(item)
seen[item] = cnt + 1
return tuple(validatednames)
#
def __call__(self, names, defaultfmt="f%i", nbfields=None):
return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
Parameters
----------
value : str
The string that is transformed to a boolean.
Returns
-------
boolval : bool
The boolean representation of `value`.
Raises
------
ValueError
If the string is not 'True' or 'False' (case independent)
Examples
--------
>>> np.lib._iotools.str2bool('TRUE')
True
>>> np.lib._iotools.str2bool('false')
False
"""
value = value.upper()
if value == asbytes('TRUE'):
return True
elif value == asbytes('FALSE'):
return False
else:
raise ValueError("Invalid boolean")
class ConverterError(Exception):
"""
Exception raised when an error occurs in a converter for string values.
"""
pass
class ConverterLockError(ConverterError):
"""
Exception raised when an attempt is made to upgrade a locked converter.
"""
pass
class ConversionWarning(UserWarning):
"""
Warning issued when a string converter has a problem.
Notes
-----
In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
is explicitly suppressed with the "invalid_raise" keyword.
"""
pass
class StringConverter(object):
"""
Factory class for function transforming a string into another object
(int, float).
After initialization, an instance can be called to transform a string
into another object. If the string is recognized as representing a
missing value, a default value is returned.
Attributes
----------
func : function
Function used for the conversion.
default : any
Default value to return when the input corresponds to a missing
value.
type : type
Type of the output.
_status : int
Integer representing the order of the conversion.
_mapper : sequence of tuples
Sequence of tuples (dtype, function, default value) to evaluate in
order.
_locked : bool
Holds `locked` parameter.
Parameters
----------
dtype_or_func : {None, dtype, function}, optional
If a `dtype`, specifies the input data type, used to define a basic
function and a default value for missing data. For example, when
`dtype` is float, the `func` attribute is set to `float` and the
default value to `np.nan`. If a function, this function is used to
convert a string to another object. In this case, it is recommended
to give an associated default value as input.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given, `StringConverter`
tries to supply a reasonable default value.
missing_values : sequence of str, optional
Sequence of strings indicating a missing value.
locked : bool, optional
Whether the StringConverter should be locked to prevent automatic
upgrade or not. Default is False.
"""
#
_mapper = [(nx.bool_, str2bool, False),
(nx.integer, int, -1)]
# On 32-bit systems, we need to make sure that we explicitly include
# nx.int64 since ns.integer is nx.int32.
if nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize:
_mapper.append((nx.int64, int, -1))
_mapper.extend([(nx.floating, float, nx.nan),
(complex, _bytes_to_complex, nx.nan + 0j),
(nx.string_, bytes, asbytes('???'))])
(_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper)
@classmethod
def _getdtype(cls, val):
"""Returns the dtype of the input variable."""
return np.array(val).dtype
#
@classmethod
def _getsubdtype(cls, val):
"""Returns the type of the dtype of the input variable."""
return np.array(val).dtype.type
#
# This is a bit annoying. We want to return the "general" type in most
# cases (ie. "string" rather than "S10"), but we want to return the
# specific type for datetime64 (ie. "datetime64[us]" rather than
# "datetime64").
@classmethod
def _dtypeortype(cls, dtype):
"""Returns dtype for datetime64 and type of dtype otherwise."""
if dtype.type == np.datetime64:
return dtype
return dtype.type
#
@classmethod
def upgrade_mapper(cls, func, default=None):
"""
Upgrade the mapper of a StringConverter by adding a new function and
its corresponding default.
The input function (or sequence of functions) and its associated
default value (if any) is inserted in penultimate position of the
mapper. The corresponding type is estimated from the dtype of the
default value.
Parameters
----------
func : var
Function, or sequence of functions
Examples
--------
>>> import dateutil.parser
>>> import datetime
>>> dateparser = datetustil.parser.parse
>>> defaultdate = datetime.date(2000, 1, 1)
>>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
"""
# Func is a single functions
if hasattr(func, '__call__'):
cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
return
elif hasattr(func, '__iter__'):
if isinstance(func[0], (tuple, list)):
for _ in func:
cls._mapper.insert(-1, _)
return
if default is None:
default = [None] * len(func)
else:
default = list(default)
default.append([None] * (len(func) - len(default)))
for (fct, dft) in zip(func, default):
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
#
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
locked=False):
# Convert unicode (for Py3)
if isinstance(missing_values, unicode):
missing_values = asbytes(missing_values)
elif isinstance(missing_values, (list, tuple)):
missing_values = asbytes_nested(missing_values)
# Defines a lock for upgrade
self._locked = bool(locked)
# No input dtype: minimal initialization
if dtype_or_func is None:
self.func = str2bool
self._status = 0
self.default = default or False
dtype = np.dtype('bool')
else:
# Is the input a np.dtype ?
try:
self.func = None
dtype = np.dtype(dtype_or_func)
except TypeError:
# dtype_or_func must be a function, then
if not hasattr(dtype_or_func, '__call__'):
errmsg = ("The input argument `dtype` is neither a"
" function nor a dtype (got '%s' instead)")
raise TypeError(errmsg % type(dtype_or_func))
# Set the function
self.func = dtype_or_func
# If we don't have a default, try to guess it or set it to
# None
if default is None:
try:
default = self.func(asbytes('0'))
except ValueError:
default = None
dtype = self._getdtype(default)
# Set the status according to the dtype
_status = -1
for (i, (deftype, func, default_def)) in enumerate(self._mapper):
if np.issubdtype(dtype.type, deftype):
_status = i
if default is None:
self.default = default_def
else:
self.default = default
break
if _status == -1:
# We never found a match in the _mapper...
_status = 0
self.default = default
self._status = _status
# If the input was a dtype, set the function to the last we saw
if self.func is None:
self.func = func
# If the status is 1 (int), change the function to
# something more robust.
if self.func == self._mapper[1][1]:
if issubclass(dtype.type, np.uint64):
self.func = np.uint64
elif issubclass(dtype.type, np.int64):
self.func = np.int64
else:
self.func = lambda x: int(float(x))
# Store the list of strings corresponding to missing values.
if missing_values is None:
self.missing_values = set([asbytes('')])
else:
if isinstance(missing_values, bytes):
missing_values = missing_values.split(asbytes(","))
self.missing_values = set(list(missing_values) + [asbytes('')])
#
self._callingfunction = self._strict_call
self.type = self._dtypeortype(dtype)
self._checked = False
self._initial_default = default
#
def _loose_call(self, value):
try:
return self.func(value)
except ValueError:
return self.default
#
def _strict_call(self, value):
try:
# We check if we can convert the value using the current function
new_value = self.func(value)
# In addition to having to check whether func can convert the
# value, we also have to make sure that we don't get overflow
# errors for integers.
if self.func is int:
try:
np.array(value, dtype=self.type)
except OverflowError:
raise ValueError
# We're still here so we can now return the new value
return new_value
except ValueError:
if value.strip() in self.missing_values:
if not self._status:
self._checked = False
return self.default
raise ValueError("Cannot convert string '%s'" % value)
#
def __call__(self, value):
return self._callingfunction(value)
#
def upgrade(self, value):
"""
Find the best converter for a given string, and return the result.
The supplied string `value` is converted by testing different
converters in order. First the `func` method of the
`StringConverter` instance is tried, if this fails other available
converters are tried. The order in which these other converters
are tried is determined by the `_status` attribute of the instance.
Parameters
----------
value : str
The string to convert.
Returns
-------
out : any
The result of converting `value` with the appropriate converter.
"""
self._checked = True
try:
return self._strict_call(value)
except ValueError:
# Raise an exception if we locked the converter...
if self._locked:
errmsg = "Converter is locked and cannot be upgraded"
raise ConverterLockError(errmsg)
_statusmax = len(self._mapper)
# Complains if we try to upgrade by the maximum
_status = self._status
if _status == _statusmax:
errmsg = "Could not find a valid conversion function"
raise ConverterError(errmsg)
elif _status < _statusmax - 1:
_status += 1
(self.type, self.func, default) = self._mapper[_status]
self._status = _status
if self._initial_default is not None:
self.default = self._initial_default
else:
self.default = default
return self.upgrade(value)
def iterupgrade(self, value):
self._checked = True
if not hasattr(value, '__iter__'):
value = (value,)
_strict_call = self._strict_call
try:
for _m in value:
_strict_call(_m)
except ValueError:
# Raise an exception if we locked the converter...
if self._locked:
errmsg = "Converter is locked and cannot be upgraded"
raise ConverterLockError(errmsg)
_statusmax = len(self._mapper)
# Complains if we try to upgrade by the maximum
_status = self._status
if _status == _statusmax:
raise ConverterError(
"Could not find a valid conversion function"
)
elif _status < _statusmax - 1:
_status += 1
(self.type, self.func, default) = self._mapper[_status]
if self._initial_default is not None:
self.default = self._initial_default
else:
self.default = default
self._status = _status
self.iterupgrade(value)
def update(self, func, default=None, testing_value=None,
missing_values=asbytes(''), locked=False):
"""
Set StringConverter attributes directly.
Parameters
----------
func : function
Conversion function.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given,
`StringConverter` tries to supply a reasonable default value.
testing_value : str, optional
A string representing a standard input value of the converter.
This string is used to help defining a reasonable default
value.
missing_values : sequence of str, optional
Sequence of strings indicating a missing value.
locked : bool, optional
Whether the StringConverter should be locked to prevent
automatic upgrade or not. Default is False.
Notes
-----
`update` takes the same parameters as the constructor of
`StringConverter`, except that `func` does not accept a `dtype`
whereas `dtype_or_func` in the constructor does.
"""
self.func = func
self._locked = locked
# Don't reset the default to None if we can avoid it
if default is not None:
self.default = default
self.type = self._dtypeortype(self._getdtype(default))
else:
try:
tester = func(testing_value or asbytes('1'))
except (TypeError, ValueError):
tester = None
self.type = self._dtypeortype(self._getdtype(tester))
# Add the missing values to the existing set
if missing_values is not None:
if _is_bytes_like(missing_values):
self.missing_values.add(missing_values)
elif hasattr(missing_values, '__iter__'):
for val in missing_values:
self.missing_values.add(val)
else:
self.missing_values = []
def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
"""
Convenience function to create a `np.dtype` object.
The function processes the input `dtype` and matches it with the given
names.
Parameters
----------
ndtype : var
Definition of the dtype. Can be any string or dictionary recognized
by the `np.dtype` function, or a sequence of types.
names : str or sequence, optional
Sequence of strings to use as field names for a structured dtype.
For convenience, `names` can be a string of a comma-separated list
of names.
defaultfmt : str, optional
Format string used to define missing names, such as ``"f%i"``
(default) or ``"fields_%02i"``.
validationargs : optional
A series of optional arguments used to initialize a
`NameValidator`.
Examples
--------
>>> np.lib._iotools.easy_dtype(float)
dtype('float64')
>>> np.lib._iotools.easy_dtype("i4, f8")
dtype([('f0', '<i4'), ('f1', '<f8')])
>>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
dtype([('field_000', '<i4'), ('field_001', '<f8')])
>>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
>>> np.lib._iotools.easy_dtype(float, names="a,b,c")
dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
"""
try:
ndtype = np.dtype(ndtype)
except TypeError:
validate = NameValidator(**validationargs)
nbfields = len(ndtype)
if names is None:
names = [''] * len(ndtype)
elif isinstance(names, basestring):
names = names.split(",")
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
ndtype = np.dtype(dict(formats=ndtype, names=names))
else:
nbtypes = len(ndtype)
# Explicit names
if names is not None:
validate = NameValidator(**validationargs)
if isinstance(names, basestring):
names = names.split(",")
# Simple dtype: repeat to match the nb of names
if nbtypes == 0:
formats = tuple([ndtype.type] * len(names))
names = validate(names, defaultfmt=defaultfmt)
ndtype = np.dtype(list(zip(names, formats)))
# Structured dtype: just validate the names as needed
else:
ndtype.names = validate(names, nbfields=nbtypes,
defaultfmt=defaultfmt)
# No implicit names
elif (nbtypes > 0):
validate = NameValidator(**validationargs)
# Default initial names : should we change the format ?
if ((ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and
(defaultfmt != "f%i")):
ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt)
# Explicit initial names : just validate
else:
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
return ndtype
| gpl-2.0 |
fengbaicanhe/intellij-community | python/lib/Lib/encodings/cp856.py | 593 | 12679 | """ Python Character Mapping Codec cp856 generated from 'MAPPINGS/VENDORS/MISC/CP856.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp856',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u05d0' # 0x80 -> HEBREW LETTER ALEF
u'\u05d1' # 0x81 -> HEBREW LETTER BET
u'\u05d2' # 0x82 -> HEBREW LETTER GIMEL
u'\u05d3' # 0x83 -> HEBREW LETTER DALET
u'\u05d4' # 0x84 -> HEBREW LETTER HE
u'\u05d5' # 0x85 -> HEBREW LETTER VAV
u'\u05d6' # 0x86 -> HEBREW LETTER ZAYIN
u'\u05d7' # 0x87 -> HEBREW LETTER HET
u'\u05d8' # 0x88 -> HEBREW LETTER TET
u'\u05d9' # 0x89 -> HEBREW LETTER YOD
u'\u05da' # 0x8A -> HEBREW LETTER FINAL KAF
u'\u05db' # 0x8B -> HEBREW LETTER KAF
u'\u05dc' # 0x8C -> HEBREW LETTER LAMED
u'\u05dd' # 0x8D -> HEBREW LETTER FINAL MEM
u'\u05de' # 0x8E -> HEBREW LETTER MEM
u'\u05df' # 0x8F -> HEBREW LETTER FINAL NUN
u'\u05e0' # 0x90 -> HEBREW LETTER NUN
u'\u05e1' # 0x91 -> HEBREW LETTER SAMEKH
u'\u05e2' # 0x92 -> HEBREW LETTER AYIN
u'\u05e3' # 0x93 -> HEBREW LETTER FINAL PE
u'\u05e4' # 0x94 -> HEBREW LETTER PE
u'\u05e5' # 0x95 -> HEBREW LETTER FINAL TSADI
u'\u05e6' # 0x96 -> HEBREW LETTER TSADI
u'\u05e7' # 0x97 -> HEBREW LETTER QOF
u'\u05e8' # 0x98 -> HEBREW LETTER RESH
u'\u05e9' # 0x99 -> HEBREW LETTER SHIN
u'\u05ea' # 0x9A -> HEBREW LETTER TAV
u'\ufffe' # 0x9B -> UNDEFINED
u'\xa3' # 0x9C -> POUND SIGN
u'\ufffe' # 0x9D -> UNDEFINED
u'\xd7' # 0x9E -> MULTIPLICATION SIGN
u'\ufffe' # 0x9F -> UNDEFINED
u'\ufffe' # 0xA0 -> UNDEFINED
u'\ufffe' # 0xA1 -> UNDEFINED
u'\ufffe' # 0xA2 -> UNDEFINED
u'\ufffe' # 0xA3 -> UNDEFINED
u'\ufffe' # 0xA4 -> UNDEFINED
u'\ufffe' # 0xA5 -> UNDEFINED
u'\ufffe' # 0xA6 -> UNDEFINED
u'\ufffe' # 0xA7 -> UNDEFINED
u'\ufffe' # 0xA8 -> UNDEFINED
u'\xae' # 0xA9 -> REGISTERED SIGN
u'\xac' # 0xAA -> NOT SIGN
u'\xbd' # 0xAB -> VULGAR FRACTION ONE HALF
u'\xbc' # 0xAC -> VULGAR FRACTION ONE QUARTER
u'\ufffe' # 0xAD -> UNDEFINED
u'\xab' # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0xB0 -> LIGHT SHADE
u'\u2592' # 0xB1 -> MEDIUM SHADE
u'\u2593' # 0xB2 -> DARK SHADE
u'\u2502' # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\ufffe' # 0xB5 -> UNDEFINED
u'\ufffe' # 0xB6 -> UNDEFINED
u'\ufffe' # 0xB7 -> UNDEFINED
u'\xa9' # 0xB8 -> COPYRIGHT SIGN
u'\u2563' # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0xBD -> CENT SIGN
u'\xa5' # 0xBE -> YEN SIGN
u'\u2510' # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\ufffe' # 0xC6 -> UNDEFINED
u'\ufffe' # 0xC7 -> UNDEFINED
u'\u255a' # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0xCF -> CURRENCY SIGN
u'\ufffe' # 0xD0 -> UNDEFINED
u'\ufffe' # 0xD1 -> UNDEFINED
u'\ufffe' # 0xD2 -> UNDEFINED
u'\ufffe' # 0xD3 -> UNDEFINEDS
u'\ufffe' # 0xD4 -> UNDEFINED
u'\ufffe' # 0xD5 -> UNDEFINED
u'\ufffe' # 0xD6 -> UNDEFINEDE
u'\ufffe' # 0xD7 -> UNDEFINED
u'\ufffe' # 0xD8 -> UNDEFINED
u'\u2518' # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0xDB -> FULL BLOCK
u'\u2584' # 0xDC -> LOWER HALF BLOCK
u'\xa6' # 0xDD -> BROKEN BAR
u'\ufffe' # 0xDE -> UNDEFINED
u'\u2580' # 0xDF -> UPPER HALF BLOCK
u'\ufffe' # 0xE0 -> UNDEFINED
u'\ufffe' # 0xE1 -> UNDEFINED
u'\ufffe' # 0xE2 -> UNDEFINED
u'\ufffe' # 0xE3 -> UNDEFINED
u'\ufffe' # 0xE4 -> UNDEFINED
u'\ufffe' # 0xE5 -> UNDEFINED
u'\xb5' # 0xE6 -> MICRO SIGN
u'\ufffe' # 0xE7 -> UNDEFINED
u'\ufffe' # 0xE8 -> UNDEFINED
u'\ufffe' # 0xE9 -> UNDEFINED
u'\ufffe' # 0xEA -> UNDEFINED
u'\ufffe' # 0xEB -> UNDEFINED
u'\ufffe' # 0xEC -> UNDEFINED
u'\ufffe' # 0xED -> UNDEFINED
u'\xaf' # 0xEE -> MACRON
u'\xb4' # 0xEF -> ACUTE ACCENT
u'\xad' # 0xF0 -> SOFT HYPHEN
u'\xb1' # 0xF1 -> PLUS-MINUS SIGN
u'\u2017' # 0xF2 -> DOUBLE LOW LINE
u'\xbe' # 0xF3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0xF4 -> PILCROW SIGN
u'\xa7' # 0xF5 -> SECTION SIGN
u'\xf7' # 0xF6 -> DIVISION SIGN
u'\xb8' # 0xF7 -> CEDILLA
u'\xb0' # 0xF8 -> DEGREE SIGN
u'\xa8' # 0xF9 -> DIAERESIS
u'\xb7' # 0xFA -> MIDDLE DOT
u'\xb9' # 0xFB -> SUPERSCRIPT ONE
u'\xb3' # 0xFC -> SUPERSCRIPT THREE
u'\xb2' # 0xFD -> SUPERSCRIPT TWO
u'\u25a0' # 0xFE -> BLACK SQUARE
u'\xa0' # 0xFF -> NO-BREAK SPACE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
marek-sezemsky/coreemu | daemon/core/session.py | 7 | 49204 | #
# CORE
# Copyright (c)2010-2013 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# authors: Tom Goff <thomas.goff@boeing.com>
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
'''
session.py: defines the Session class used by the core-daemon daemon program
that manages a CORE session.
'''
import os, sys, tempfile, shutil, shlex, atexit, gc, pwd
import threading, time, random
from core.api import coreapi
if os.uname()[0] == "Linux":
from core.netns import nodes
from core.netns.vnet import GreTapBridge
elif os.uname()[0] == "FreeBSD":
from core.bsd import nodes
from core.emane import emane
from core.misc.utils import check_call, mutedetach, readfileintodict, \
filemunge, filedemunge
from core.conf import ConfigurableManager, Configurable
from core.location import CoreLocation
from core.service import CoreServices
from core.broker import CoreBroker
from core.mobility import MobilityManager
from core.sdt import Sdt
from core.misc.ipaddr import MacAddr
from core.misc.event import EventLoop
from core.constants import *
from core.misc.xmlsession import savesessionxml
from core.xen import xenconfig
class Session(object):
# sessions that get automatically shutdown when the process
# terminates normally
__sessions = set()
''' CORE session manager.
'''
def __init__(self, sessionid = None, cfg = {}, server = None,
persistent = False, mkdir = True):
if sessionid is None:
# try to keep this short since it's used to construct
# network interface names
pid = os.getpid()
sessionid = ((pid >> 16) ^
(pid & ((1 << 16) - 1)))
sessionid ^= ((id(self) >> 16) ^ (id(self) & ((1 << 16) - 1)))
sessionid &= 0xffff
self.sessionid = sessionid
self.sessiondir = os.path.join(tempfile.gettempdir(),
"pycore.%s" % self.sessionid)
if mkdir:
os.mkdir(self.sessiondir)
self.name = None
self.filename = None
self.thumbnail = None
self.user = None
self.node_count = None
self._time = time.time()
self.evq = EventLoop()
# dict of objects: all nodes and nets
self._objs = {}
self._objslock = threading.Lock()
# dict of configurable objects
self._confobjs = {}
self._confobjslock = threading.Lock()
self._handlers = set()
self._handlerslock = threading.Lock()
self._state = None
self._hooks = {}
self._state_hooks = {}
# dict of configuration items from /etc/core/core.conf config file
self.cfg = cfg
self.add_state_hook(coreapi.CORE_EVENT_RUNTIME_STATE,
self.runtime_state_hook)
self.setstate(state=coreapi.CORE_EVENT_DEFINITION_STATE,
info=False, sendevent=False)
self.server = server
if not persistent:
self.addsession(self)
self.master = False
self.broker = CoreBroker(session=self, verbose=True)
self.location = CoreLocation(self)
self.mobility = MobilityManager(self)
self.services = CoreServices(self)
self.emane = emane.Emane(self)
self.xen = xenconfig.XenConfigManager(self)
self.sdt = Sdt(self)
# future parameters set by the GUI may go here
self.options = SessionConfig(self)
self.metadata = SessionMetaData(self)
@classmethod
def addsession(cls, session):
cls.__sessions.add(session)
@classmethod
def delsession(cls, session):
try:
cls.__sessions.remove(session)
except KeyError:
pass
@classmethod
def atexit(cls):
while cls.__sessions:
s = cls.__sessions.pop()
print >> sys.stderr, "WARNING: automatically shutting down " \
"non-persistent session %s" % s.sessionid
s.shutdown()
def __del__(self):
# note: there is no guarantee this will ever run
self.shutdown()
def shutdown(self):
''' Shut down all emulation objects and remove the session directory.
'''
if hasattr(self, 'emane'):
self.emane.shutdown()
if hasattr(self, 'broker'):
self.broker.shutdown()
if hasattr(self, 'sdt'):
self.sdt.shutdown()
self.delobjs()
preserve = False
if hasattr(self.options, 'preservedir'):
if self.options.preservedir == '1':
preserve = True
if not preserve:
shutil.rmtree(self.sessiondir, ignore_errors = True)
if self.server:
self.server.delsession(self)
self.delsession(self)
def isconnected(self):
''' Returns true if this session has a request handler.
'''
with self._handlerslock:
if len(self._handlers) == 0:
return False
else:
return True
def connect(self, handler):
''' Set the request handler for this session, making it connected.
'''
# the master flag will only be set after a GUI has connected with the
# handler, e.g. not during normal startup
if handler.master is True:
self.master = True
with self._handlerslock:
self._handlers.add(handler)
def disconnect(self, handler):
''' Disconnect a request handler from this session. Shutdown this
session if there is no running emulation.
'''
with self._handlerslock:
try:
self._handlers.remove(handler)
except KeyError:
raise ValueError, \
"Handler %s not associated with this session" % handler
num_handlers = len(self._handlers)
if num_handlers == 0:
# shut down this session unless we are instantiating, running,
# or collecting final data
if self.getstate() < coreapi.CORE_EVENT_INSTANTIATION_STATE or \
self.getstate() > coreapi.CORE_EVENT_DATACOLLECT_STATE:
self.shutdown()
def broadcast(self, src, msg):
''' Send Node and Link CORE API messages to all handlers connected to this session.
'''
self._handlerslock.acquire()
for handler in self._handlers:
if handler == src:
continue
if isinstance(msg, coreapi.CoreNodeMessage) or \
isinstance(msg, coreapi.CoreLinkMessage):
try:
handler.sendall(msg.rawmsg)
except Exception, e:
self.warn("sendall() error: %s" % e)
self._handlerslock.release()
def broadcastraw(self, src, data):
''' Broadcast raw data to all handlers except src.
'''
self._handlerslock.acquire()
for handler in self._handlers:
if handler == src:
continue
try:
handler.sendall(data)
except Exception, e:
self.warn("sendall() error: %s" % e)
self._handlerslock.release()
def gethandler(self):
''' Get one of the connected handlers, preferrably the master.
'''
with self._handlerslock:
if len(self._handlers) == 0:
return None
for handler in self._handlers:
if handler.master:
return handler
for handler in self._handlers:
return handler
def setstate(self, state, info = False, sendevent = False,
returnevent = False):
''' Set the session state. When info is true, log the state change
event using the session handler's info method. When sendevent is
true, generate a CORE API Event Message and send to the connected
entity.
'''
if state == self._state:
return []
self._time = time.time()
self._state = state
self.run_state_hooks(state)
replies = []
if self.isconnected() and info:
statename = coreapi.state_name(state)
with self._handlerslock:
for handler in self._handlers:
handler.info("SESSION %s STATE %d: %s at %s" % \
(self.sessionid, state, statename,
time.ctime()))
self.writestate(state)
self.runhook(state)
if sendevent:
tlvdata = ""
tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE,
state)
msg = coreapi.CoreEventMessage.pack(0, tlvdata)
# send Event Message to connected handlers (e.g. GUI)
if self.isconnected():
try:
if returnevent:
replies.append(msg)
else:
self.broadcastraw(None, msg)
except Exception, e:
self.warn("Error sending Event Message: %s" % e)
# also inform slave servers
tmp = self.broker.handlerawmsg(msg)
return replies
def getstate(self):
''' Retrieve the current state of the session.
'''
return self._state
def writestate(self, state):
''' Write the current state to a state file in the session dir.
'''
try:
f = open(os.path.join(self.sessiondir, "state"), "w")
f.write("%d %s\n" % (state, coreapi.state_name(state)))
f.close()
except Exception, e:
self.warn("Error writing state file: %s" % e)
def runhook(self, state, hooks=None):
''' Run hook scripts upon changing states.
If hooks is not specified, run all hooks in the given state.
'''
if state not in self._hooks:
return
if hooks is None:
hooks = self._hooks[state]
for (filename, data) in hooks:
try:
f = open(os.path.join(self.sessiondir, filename), "w")
f.write(data)
f.close()
except Exception, e:
self.warn("Error writing hook '%s': %s" % (filename, e))
self.info("Running hook %s for state %s" % (filename, state))
try:
check_call(["/bin/sh", filename], cwd=self.sessiondir,
env=self.getenviron())
except Exception, e:
self.warn("Error running hook '%s' for state %s: %s" %
(filename, state, e))
def sethook(self, type, filename, srcname, data):
''' Store a hook from a received File Message.
'''
if srcname is not None:
raise NotImplementedError
(hookid, state) = type.split(':')[:2]
if not state.isdigit():
self.warn("Error setting hook having state '%s'" % state)
return
state = int(state)
hook = (filename, data)
if state not in self._hooks:
self._hooks[state] = [hook,]
else:
self._hooks[state] += hook
# immediately run a hook if it is in the current state
# (this allows hooks in the definition and configuration states)
if self.getstate() == state:
self.runhook(state, hooks = [hook,])
def delhooks(self):
''' Clear the hook scripts dict.
'''
self._hooks = {}
def run_state_hooks(self, state):
if state not in self._state_hooks:
return
for hook in self._state_hooks[state]:
try:
hook(state)
except Exception, e:
self.warn("ERROR: exception occured when running %s state "
"hook: %s: %s" % (coreapi.state_name(state),
hook, e))
def add_state_hook(self, state, hook):
try:
hooks = self._state_hooks[state]
assert hook not in hooks
hooks.append(hook)
except KeyError:
self._state_hooks[state] = [hook]
if self._state == state:
hook(state)
def del_state_hook(self, state, hook):
try:
hooks = self._state_hooks[state]
self._state_hooks[state] = filter(lambda x: x != hook, hooks)
except KeyError:
pass
def runtime_state_hook(self, state):
if state == coreapi.CORE_EVENT_RUNTIME_STATE:
self.emane.poststartup()
xmlfilever = self.cfg['xmlfilever']
if xmlfilever in ('1.0',):
xmlfilename = os.path.join(self.sessiondir,
'session-deployed.xml')
savesessionxml(self, xmlfilename, xmlfilever)
def getenviron(self, state=True):
''' Get an environment suitable for a subprocess.Popen call.
This is the current process environment with some session-specific
variables.
'''
env = os.environ.copy()
env['SESSION'] = "%s" % self.sessionid
env['SESSION_SHORT'] = "%s" % self.shortsessionid()
env['SESSION_DIR'] = "%s" % self.sessiondir
env['SESSION_NAME'] = "%s" % self.name
env['SESSION_FILENAME'] = "%s" % self.filename
env['SESSION_USER'] = "%s" % self.user
env['SESSION_NODE_COUNT'] = "%s" % self.node_count
if state:
env['SESSION_STATE'] = "%s" % self.getstate()
try:
readfileintodict(os.path.join(CORE_CONF_DIR, "environment"), env)
except IOError:
pass
if self.user:
try:
readfileintodict(os.path.join('/home', self.user, ".core",
"environment"), env)
except IOError:
pass
return env
def setthumbnail(self, thumbfile):
''' Set the thumbnail filename. Move files from /tmp to session dir.
'''
if not os.path.exists(thumbfile):
self.thumbnail = None
return
dstfile = os.path.join(self.sessiondir, os.path.basename(thumbfile))
shutil.move(thumbfile, dstfile)
#print "thumbnail: %s -> %s" % (thumbfile, dstfile)
self.thumbnail = dstfile
def setuser(self, user):
''' Set the username for this session. Update the permissions of the
session dir to allow the user write access.
'''
if user is not None:
try:
uid = pwd.getpwnam(user).pw_uid
gid = os.stat(self.sessiondir).st_gid
os.chown(self.sessiondir, uid, gid)
except Exception, e:
self.warn("Failed to set permission on %s: %s" % (self.sessiondir, e))
self.user = user
def objs(self):
''' Return iterator over the emulation object dictionary.
'''
return self._objs.itervalues()
def getobjid(self):
''' Return a unique, random object id.
'''
self._objslock.acquire()
while True:
id = random.randint(1, 0xFFFF)
if id not in self._objs:
break
self._objslock.release()
return id
def addobj(self, cls, *clsargs, **clskwds):
''' Add an emulation object.
'''
obj = cls(self, *clsargs, **clskwds)
self._objslock.acquire()
if obj.objid in self._objs:
self._objslock.release()
obj.shutdown()
raise KeyError, "non-unique object id %s for %s" % (obj.objid, obj)
self._objs[obj.objid] = obj
self._objslock.release()
return obj
def obj(self, objid):
''' Get an emulation object.
'''
if objid not in self._objs:
raise KeyError, "unknown object id %s" % (objid)
return self._objs[objid]
def objbyname(self, name):
''' Get an emulation object using its name attribute.
'''
with self._objslock:
for obj in self.objs():
if hasattr(obj, "name") and obj.name == name:
return obj
raise KeyError, "unknown object with name %s" % (name)
def delobj(self, objid):
''' Remove an emulation object.
'''
self._objslock.acquire()
try:
o = self._objs.pop(objid)
except KeyError:
o = None
self._objslock.release()
if o:
o.shutdown()
del o
gc.collect()
# print "gc count:", gc.get_count()
# for o in gc.get_objects():
# if isinstance(o, PyCoreObj):
# print "XXX XXX XXX PyCoreObj:", o
# for r in gc.get_referrers(o):
# print "XXX XXX XXX referrer:", gc.get_referrers(o)
def delobjs(self):
''' Clear the _objs dictionary, and call each obj.shutdown() routine.
'''
self._objslock.acquire()
while self._objs:
k, o = self._objs.popitem()
o.shutdown()
self._objslock.release()
def writeobjs(self):
''' Write objects to a 'nodes' file in the session dir.
The 'nodes' file lists:
number, name, api-type, class-type
'''
try:
f = open(os.path.join(self.sessiondir, "nodes"), "w")
with self._objslock:
for objid in sorted(self._objs.keys()):
o = self._objs[objid]
f.write("%s %s %s %s\n" % (objid, o.name, o.apitype, type(o)))
f.close()
except Exception, e:
self.warn("Error writing nodes file: %s" % e)
def addconfobj(self, objname, type, callback):
''' Objects can register configuration objects that are included in
the Register Message and may be configured via the Configure
Message. The callback is invoked when receiving a Configure Message.
'''
if type not in coreapi.reg_tlvs:
raise Exception, "invalid configuration object type"
self._confobjslock.acquire()
self._confobjs[objname] = (type, callback)
self._confobjslock.release()
def confobj(self, objname, session, msg):
''' Invoke the callback for an object upon receipt of a Configure
Message for that object. A no-op if the object doesn't exist.
'''
replies = []
self._confobjslock.acquire()
if objname == "all":
for objname in self._confobjs:
(type, callback) = self._confobjs[objname]
reply = callback(session, msg)
if reply is not None:
replies.append(reply)
self._confobjslock.release()
return replies
if objname in self._confobjs:
(type, callback) = self._confobjs[objname]
self._confobjslock.release()
reply = callback(session, msg)
if reply is not None:
replies.append(reply)
return replies
else:
self.info("session object doesn't own model '%s', ignoring" % \
objname)
self._confobjslock.release()
return replies
def confobjs_to_tlvs(self):
''' Turn the configuration objects into a list of Register Message TLVs.
'''
tlvdata = ""
self._confobjslock.acquire()
for objname in self._confobjs:
(type, callback) = self._confobjs[objname]
# type must be in coreapi.reg_tlvs
tlvdata += coreapi.CoreRegTlv.pack(type, objname)
self._confobjslock.release()
return tlvdata
def info(self, msg):
''' Utility method for writing output to stdout.
'''
print msg
sys.stdout.flush()
def warn(self, msg):
''' Utility method for writing output to stderr.
'''
print >> sys.stderr, msg
sys.stderr.flush()
def dumpsession(self):
''' Debug print this session.
'''
self.info("session id=%s name=%s state=%s connected=%s" % \
(self.sessionid, self.name, self._state, self.isconnected()))
num = len(self._objs)
self.info(" file=%s thumb=%s nc=%s/%s" % \
(self.filename, self.thumbnail, self.node_count, num))
def exception(self, level, source, objid, text):
''' Generate an Exception Message
'''
vals = (objid, str(self.sessionid), level, source, time.ctime(), text)
types = ("NODE", "SESSION", "LEVEL", "SOURCE", "DATE", "TEXT")
tlvdata = ""
for (t,v) in zip(types, vals):
if v is not None:
tlvdata += coreapi.CoreExceptionTlv.pack(
eval("coreapi.CORE_TLV_EXCP_%s" % t), v)
msg = coreapi.CoreExceptionMessage.pack(0, tlvdata)
self.warn("exception: %s (%s) %s" % (source, objid, text))
# send Exception Message to connected handlers (e.g. GUI)
self.broadcastraw(None, msg)
def getcfgitem(self, cfgname):
''' Return an entry from the configuration dictionary that comes from
command-line arguments and/or the core.conf config file.
'''
if cfgname not in self.cfg:
return None
else:
return self.cfg[cfgname]
def getcfgitembool(self, cfgname, defaultifnone = None):
''' Return a boolean entry from the configuration dictionary, may
return None if undefined.
'''
item = self.getcfgitem(cfgname)
if item is None:
return defaultifnone
return bool(item.lower() == "true")
def getcfgitemint(self, cfgname, defaultifnone = None):
''' Return an integer entry from the configuration dictionary, may
return None if undefined.
'''
item = self.getcfgitem(cfgname)
if item is None:
return defaultifnone
return int(item)
def instantiate(self, handler=None):
''' We have entered the instantiation state, invoke startup methods
of various managers and boot the nodes. Validate nodes and check
for transition to the runtime state.
'''
self.writeobjs()
# controlnet may be needed by some EMANE models
self.addremovectrlif(node=None, remove=False)
if self.emane.startup() == self.emane.NOT_READY:
return # instantiate() will be invoked again upon Emane.configure()
self.broker.startup()
self.mobility.startup()
# boot the services on each node
self.bootnodes(handler)
# allow time for processes to start
time.sleep(0.125)
self.validatenodes()
# assume either all nodes have booted already, or there are some
# nodes on slave servers that will be booted and those servers will
# send a node status response message
self.checkruntime()
def getnodecount(self):
''' Returns the number of CoreNodes and CoreNets, except for those
that are not considered in the GUI's node count.
'''
with self._objslock:
count = len(filter(lambda(x): \
not isinstance(x, (nodes.PtpNet, nodes.CtrlNet)),
self.objs()))
# on Linux, GreTapBridges are auto-created, not part of
# GUI's node count
if 'GreTapBridge' in globals():
count -= len(filter(lambda(x): \
isinstance(x, GreTapBridge) and not \
isinstance(x, nodes.TunnelNode),
self.objs()))
return count
def checkruntime(self):
''' Check if we have entered the runtime state, that all nodes have been
started and the emulation is running. Start the event loop once we
have entered runtime (time=0).
'''
# this is called from instantiate() after receiving an event message
# for the instantiation state, and from the broker when distributed
# nodes have been started
if self.node_count is None:
return
if self.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE:
return
session_node_count = int(self.node_count)
nc = self.getnodecount()
# count booted nodes not emulated on this server
# TODO: let slave server determine RUNTIME and wait for Event Message
# broker.getbootocunt() counts all CoreNodes from status reponse
# messages, plus any remote WLANs; remote EMANE, hub, switch, etc.
# are already counted in self._objs
nc += self.broker.getbootcount()
self.info("Checking for runtime with %d of %d session nodes" % \
(nc, session_node_count))
if nc < session_node_count:
return # do not have information on all nodes yet
# information on all nodes has been received and they have been started
# enter the runtime state
# TODO: more sophisticated checks to verify that all nodes and networks
# are running
state = coreapi.CORE_EVENT_RUNTIME_STATE
self.evq.run()
self.setstate(state, info=True, sendevent=True)
def datacollect(self):
''' Tear down a running session. Stop the event loop and any running
nodes, and perform clean-up.
'''
self.evq.stop()
with self._objslock:
for obj in self.objs():
if isinstance(obj, nodes.PyCoreNode):
self.services.stopnodeservices(obj)
self.emane.shutdown()
self.updatectrlifhosts(remove=True)
# Remove all four possible control networks. Does nothing if ctrlnet is not installed.
self.addremovectrlif(node=None, remove=True)
self.addremovectrlif(node=None, netidx=1, remove=True)
self.addremovectrlif(node=None, netidx=2, remove=True)
self.addremovectrlif(node=None, netidx=3, remove=True)
# self.checkshutdown() is currently invoked from node delete handler
def checkshutdown(self):
''' Check if we have entered the shutdown state, when no running nodes
and links remain.
'''
nc = self.getnodecount()
# TODO: this doesn't consider slave server node counts
# wait for slave servers to enter SHUTDOWN state, then master session
# can enter SHUTDOWN
replies = ()
if self.getcfgitembool('verbose', False):
self.info("Session %d shutdown: %d nodes remaining" % \
(self.sessionid, nc))
if nc == 0:
replies = self.setstate(state=coreapi.CORE_EVENT_SHUTDOWN_STATE,
info=True, sendevent=True, returnevent=True)
self.sdt.shutdown()
return replies
def setmaster(self, handler):
''' Look for the specified handler and set our master flag
appropriately. Returns True if we are connected to the given
handler.
'''
with self._handlerslock:
for h in self._handlers:
if h != handler:
continue
self.master = h.master
return True
return False
def shortsessionid(self):
''' Return a shorter version of the session ID, appropriate for
interface names, where length may be limited.
'''
ssid = (self.sessionid >> 8) ^ (self.sessionid & ((1 << 8) - 1))
return "%x" % ssid
def sendnodeemuid(self, handler, nodenum):
''' Send back node messages to the GUI for node messages that had
the status request flag.
'''
if handler is None:
return
if nodenum in handler.nodestatusreq:
tlvdata = ""
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_NUMBER,
nodenum)
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_EMUID,
nodenum)
reply = coreapi.CoreNodeMessage.pack(coreapi.CORE_API_ADD_FLAG \
| coreapi.CORE_API_LOC_FLAG,
tlvdata)
try:
handler.request.sendall(reply)
except Exception, e:
self.warn("sendall() for node: %d error: %s" % (nodenum, e))
del handler.nodestatusreq[nodenum]
def bootnodes(self, handler):
''' Invoke the boot() procedure for all nodes and send back node
messages to the GUI for node messages that had the status
request flag.
'''
with self._objslock:
for n in self.objs():
if isinstance(n, nodes.PyCoreNode) and \
not isinstance(n, nodes.RJ45Node):
# add a control interface if configured
self.addremovectrlif(node=n, remove=False)
n.boot()
self.sendnodeemuid(handler, n.objid)
self.updatectrlifhosts()
def validatenodes(self):
with self._objslock:
for n in self.objs():
# TODO: this can be extended to validate everything
# such as vnoded process, bridges, etc.
if not isinstance(n, nodes.PyCoreNode):
continue
if isinstance(n, nodes.RJ45Node):
continue
n.validate()
def getctrlnetprefixes(self):
p = getattr(self.options, 'controlnet', self.cfg.get('controlnet'))
p0 = getattr(self.options, 'controlnet0', self.cfg.get('controlnet0'))
p1 = getattr(self.options, 'controlnet1', self.cfg.get('controlnet1'))
p2 = getattr(self.options, 'controlnet2', self.cfg.get('controlnet2'))
p3 = getattr(self.options, 'controlnet3', self.cfg.get('controlnet3'))
if not p0 and p:
p0 = p
return [p0,p1,p2,p3]
def getctrlnetserverintf(self):
d0 = self.cfg.get('controlnetif0')
if d0:
self.warn("controlnet0 cannot be assigned with a host interface")
d1 = self.cfg.get('controlnetif1')
d2 = self.cfg.get('controlnetif2')
d3 = self.cfg.get('controlnetif3')
return [None,d1,d2,d3]
def getctrlnetidx(self, dev):
if dev[0:4] == 'ctrl' and int(dev[4]) in [0,1,2,3]:
idx = int(dev[4])
if idx == 0:
return idx
if idx < 4 and self.getctrlnetprefixes()[idx] is not None:
return idx
return -1
def getctrlnetobj(self, netidx):
oid = "ctrl%dnet" % netidx
return self.obj(oid)
def addremovectrlnet(self, netidx, remove=False, conf_reqd=True):
''' Create a control network bridge as necessary.
When the remove flag is True, remove the bridge that connects control
interfaces. The conf_reqd flag, when False, causes a control network
bridge to be added even if one has not been configured.
'''
prefixspeclist = self.getctrlnetprefixes()
prefixspec = prefixspeclist[netidx]
if not prefixspec:
if conf_reqd:
return None # no controlnet needed
else:
prefixspec = nodes.CtrlNet.DEFAULT_PREFIX_LIST[netidx]
serverintf = self.getctrlnetserverintf()[netidx]
# return any existing controlnet bridge
try:
ctrlnet = self.getctrlnetobj(netidx)
if remove:
self.delobj(ctrlnet.objid)
return None
return ctrlnet
except KeyError:
if remove:
return None
# build a new controlnet bridge
oid = "ctrl%dnet" % netidx
# use the updown script for control net 0 only.
updown_script = None
if netidx == 0:
try:
if self.cfg['controlnet_updown_script']:
updown_script = self.cfg['controlnet_updown_script']
except KeyError:
pass
# Check if session option set, overwrite if so
if hasattr(self.options, 'controlnet_updown_script'):
new_uds = self.options.controlnet_updown_script
if new_uds:
updown_script = new_uds
prefixes = prefixspec.split()
if len(prefixes) > 1:
# A list of per-host prefixes is provided
assign_address = True
if self.master:
try:
# split first (master) entry into server and prefix
prefix = prefixes[0].split(':', 1)[1]
except IndexError:
# no server name. possibly only one server
prefix = prefixes[0]
else:
# slave servers have their name and localhost in the serverlist
servers = self.broker.getserverlist()
servers.remove('localhost')
prefix = None
for server_prefix in prefixes:
try:
# split each entry into server and prefix
server, p = server_prefix.split(':')
except ValueError:
server = ""
p = None
if server == servers[0]:
# the server name in the list matches this server
prefix = p
break
if not prefix:
msg = "Control network prefix not found for server '%s'" % \
servers[0]
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
"Session.addremovectrlnet()", None, msg)
assign_address = False
try:
prefix = prefixes[0].split(':', 1)[1]
except IndexError:
prefix = prefixes[0]
else: # len(prefixes) == 1
# TODO: can we get the server name from the servers.conf or from the node assignments?
# with one prefix, only master gets a ctrlnet address
assign_address = self.master
prefix = prefixes[0]
ctrlnet = self.addobj(cls=nodes.CtrlNet, objid=oid, prefix=prefix,
assign_address=assign_address,
updown_script=updown_script, serverintf=serverintf)
# tunnels between controlnets will be built with Broker.addnettunnels()
self.broker.addnet(oid)
for server in self.broker.getserverlist():
self.broker.addnodemap(server, oid)
return ctrlnet
def addremovectrlif(self, node, netidx=0, remove=False, conf_reqd=True):
''' Add a control interface to a node when a 'controlnet' prefix is
listed in the config file or session options. Uses
addremovectrlnet() to build or remove the control bridge.
If conf_reqd is False, the control network may be built even
when the user has not configured one (e.g. for EMANE.)
'''
ctrlnet = self.addremovectrlnet(netidx, remove, conf_reqd)
if ctrlnet is None:
return
if node is None:
return
if node.netif(ctrlnet.CTRLIF_IDX_BASE + netidx):
return # ctrl# already exists
ctrlip = node.objid
try:
addrlist = ["%s/%s" % (ctrlnet.prefix.addr(ctrlip),
ctrlnet.prefix.prefixlen)]
except ValueError:
msg = "Control interface not added to node %s. " % node.objid
msg += "Invalid control network prefix (%s). " % ctrlnet.prefix
msg += "A longer prefix length may be required for this many nodes."
node.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
"Session.addremovectrlif()", msg)
return
ifi = node.newnetif(net = ctrlnet, ifindex = ctrlnet.CTRLIF_IDX_BASE + netidx,
ifname = "ctrl%d" % netidx, hwaddr = MacAddr.random(),
addrlist = addrlist)
node.netif(ifi).control = True
def updatectrlifhosts(self, netidx=0, remove=False):
''' Add the IP addresses of control interfaces to the /etc/hosts file.
'''
if not self.getcfgitembool('update_etc_hosts', False):
return
try:
ctrlnet = self.getctrlnetobj(netidx)
except KeyError:
return
header = "CORE session %s host entries" % self.sessionid
if remove:
if self.getcfgitembool('verbose', False):
self.info("Removing /etc/hosts file entries.")
filedemunge('/etc/hosts', header)
return
entries = []
for ifc in ctrlnet.netifs():
name = ifc.node.name
for addr in ifc.addrlist:
entries.append("%s %s" % (addr.split('/')[0], ifc.node.name))
if self.getcfgitembool('verbose', False):
self.info("Adding %d /etc/hosts file entries." % len(entries))
filemunge('/etc/hosts', header, '\n'.join(entries) + '\n')
def runtime(self):
''' Return the current time we have been in the runtime state, or zero
if not in runtime.
'''
if self.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE:
return time.time() - self._time
else:
return 0.0
def addevent(self, etime, node=None, name=None, data=None):
''' Add an event to the event queue, with a start time relative to the
start of the runtime state.
'''
etime = float(etime)
runtime = self.runtime()
if runtime > 0.0:
if time <= runtime:
self.warn("Could not schedule past event for time %s " \
"(run time is now %s)" % (time, runtime))
return
etime = etime - runtime
func = self.runevent
self.evq.add_event(etime, func, node=node, name=name, data=data)
if name is None:
name = ""
self.info("scheduled event %s at time %s data=%s" % \
(name, etime + runtime, data))
def runevent(self, node=None, name=None, data=None):
''' Run a scheduled event, executing commands in the data string.
'''
now = self.runtime()
if name is None:
name = ""
self.info("running event %s at time %s cmd=%s" % (name, now, data))
if node is None:
mutedetach(shlex.split(data))
else:
n = self.obj(node)
n.cmd(shlex.split(data), wait=False)
def sendobjs(self):
''' Return API messages that describe the current session.
'''
replies = []
nn = 0
# send node messages for node and network objects
with self._objslock:
for obj in self.objs():
msg = obj.tonodemsg(flags = coreapi.CORE_API_ADD_FLAG)
if msg is not None:
replies.append(msg)
nn += 1
nl = 0
# send link messages from net objects
with self._objslock:
for obj in self.objs():
linkmsgs = obj.tolinkmsgs(flags = coreapi.CORE_API_ADD_FLAG)
for msg in linkmsgs:
replies.append(msg)
nl += 1
# send model info
configs = self.mobility.getallconfigs()
configs += self.emane.getallconfigs()
for (nodenum, cls, values) in configs:
#cls = self.mobility._modelclsmap[conftype]
msg = cls.toconfmsg(flags=0, nodenum=nodenum,
typeflags=coreapi.CONF_TYPE_FLAGS_UPDATE,
values=values)
replies.append(msg)
# service customizations
svc_configs = self.services.getallconfigs()
for (nodenum, svc) in svc_configs:
opaque = "service:%s" % svc._name
tlvdata = ""
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE,
nodenum)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OPAQUE,
opaque)
tmp = coreapi.CoreConfMessage(flags=0, hdr="", data=tlvdata)
replies.append(self.services.configure_request(tmp))
for (filename, data) in self.services.getallfiles(svc):
flags = coreapi.CORE_API_ADD_FLAG
tlvdata = coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NODE,
nodenum)
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NAME,
str(filename))
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_TYPE,
opaque)
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_DATA,
str(data))
replies.append(coreapi.CoreFileMessage.pack(flags, tlvdata))
# TODO: send location info
# replies.append(self.location.toconfmsg())
# send hook scripts
for state in sorted(self._hooks.keys()):
for (filename, data) in self._hooks[state]:
flags = coreapi.CORE_API_ADD_FLAG
tlvdata = coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NAME,
str(filename))
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_TYPE,
"hook:%s" % state)
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_DATA,
str(data))
replies.append(coreapi.CoreFileMessage.pack(flags, tlvdata))
# send meta data
tmp = coreapi.CoreConfMessage(flags=0, hdr="", data="")
opts = self.options.configure_request(tmp,
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE)
if opts:
replies.append(opts)
meta = self.metadata.configure_request(tmp,
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE)
if meta:
replies.append(meta)
self.info("informing GUI about %d nodes and %d links" % (nn, nl))
return replies
class SessionConfig(ConfigurableManager, Configurable):
_name = 'session'
_type = coreapi.CORE_TLV_REG_UTILITY
_confmatrix = [
("controlnet", coreapi.CONF_DATA_TYPE_STRING, '', '',
'Control network'),
("controlnet_updown_script", coreapi.CONF_DATA_TYPE_STRING, '', '',
'Control network script'),
("enablerj45", coreapi.CONF_DATA_TYPE_BOOL, '1', 'On,Off',
'Enable RJ45s'),
("preservedir", coreapi.CONF_DATA_TYPE_BOOL, '0', 'On,Off',
'Preserve session dir'),
("enablesdt", coreapi.CONF_DATA_TYPE_BOOL, '0', 'On,Off',
'Enable SDT3D output'),
("sdturl", coreapi.CONF_DATA_TYPE_STRING, Sdt.DEFAULT_SDT_URL, '',
'SDT3D URL'),
]
_confgroups = "Options:1-%d" % len(_confmatrix)
def __init__(self, session):
ConfigurableManager.__init__(self, session)
session.broker.handlers += (self.handledistributed, )
self.reset()
def reset(self):
defaults = self.getdefaultvalues()
for k in self.getnames():
# value may come from config file
v = self.session.getcfgitem(k)
if v is None:
v = self.valueof(k, defaults)
v = self.offontobool(v)
setattr(self, k, v)
def configure_values(self, msg, values):
return self.configure_values_keyvalues(msg, values, self,
self.getnames())
def configure_request(self, msg, typeflags = coreapi.CONF_TYPE_FLAGS_NONE):
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
values = []
for k in self.getnames():
v = getattr(self, k)
if v is None:
v = ""
values.append("%s" % v)
return self.toconfmsg(0, nodenum, typeflags, values)
def handledistributed(self, msg):
''' Handle the session options config message as it has reached the
broker. Options requiring modification for distributed operation should
be handled here.
'''
if not self.session.master:
return
if msg.msgtype != coreapi.CORE_API_CONF_MSG or \
msg.gettlv(coreapi.CORE_TLV_CONF_OBJ) != "session":
return
values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES)
if values_str is None:
return
values = values_str.split('|')
if not self.haskeyvalues(values):
return
for v in values:
key, value = v.split('=', 1)
if key == "controlnet":
self.handledistributedcontrolnet(msg, values, values.index(v))
def handledistributedcontrolnet(self, msg, values, idx):
''' Modify Config Message if multiple control network prefixes are
defined. Map server names to prefixes and repack the message before
it is forwarded to slave servers.
'''
kv = values[idx]
key, value = kv.split('=', 1)
controlnets = value.split()
if len(controlnets) < 2:
return # multiple controlnet prefixes do not exist
servers = self.session.broker.getserverlist()
if len(servers) < 2:
return # not distributed
servers.remove("localhost")
servers.insert(0, "localhost") # master always gets first prefix
# create list of "server1:ctrlnet1 server2:ctrlnet2 ..."
controlnets = map(lambda(x): "%s:%s" % (x[0],x[1]),
zip(servers, controlnets))
values[idx] = "controlnet=%s" % (' '.join(controlnets))
values_str = '|'.join(values)
msg.tlvdata[coreapi.CORE_TLV_CONF_VALUES] = values_str
msg.repack()
class SessionMetaData(ConfigurableManager):
''' Metadata is simply stored in a configs[] dict. Key=value pairs are
passed in from configure messages destined to the "metadata" object.
The data is not otherwise interpreted or processed.
'''
_name = "metadata"
_type = coreapi.CORE_TLV_REG_UTILITY
def configure_values(self, msg, values):
if values is None:
return None
kvs = values.split('|')
for kv in kvs:
try:
(key, value) = kv.split('=', 1)
except ValueError:
raise ValueError, "invalid key in metdata: %s" % kv
self.additem(key, value)
return None
def configure_request(self, msg, typeflags = coreapi.CONF_TYPE_FLAGS_NONE):
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
values_str = "|".join(map(lambda(k,v): "%s=%s" % (k,v), self.items()))
return self.toconfmsg(0, nodenum, typeflags, values_str)
def toconfmsg(self, flags, nodenum, typeflags, values_str):
tlvdata = ""
if nodenum is not None:
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE,
nodenum)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ,
self._name)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE,
typeflags)
datatypes = tuple( map(lambda(k,v): coreapi.CONF_DATA_TYPE_STRING,
self.items()) )
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
datatypes)
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
values_str)
msg = coreapi.CoreConfMessage.pack(flags, tlvdata)
return msg
def additem(self, key, value):
self.configs[key] = value
def items(self):
return self.configs.iteritems()
atexit.register(Session.atexit)
| bsd-2-clause |
gavrieltal/opencog | opencog/embodiment/Monitor/emotion_space_browser.py | 17 | 8987 | import numpy as np
import zmq
import json
import matplotlib as mpl
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
# configure the matplotlib settings
#mpl.rcParams['legend.fontsize'] = 10
from PyQt4 import QtGui, QtCore
from common import *
class ZmqMultiFiltersSubscriberThread(QThread):
data_update_signal = pyqtSignal(dict)
def __init__(self, widget, publish_endpoint, filter_list,
zmq_context = glb.zmq_context, parent = None):
"""
widget should contains the slot method as below:
@pyqtSlot(dict)
def handle_data_update(self, json_dict):
# Some code here to process the data in json format
publish_endpoint tells the subscriber where the message source is
"""
# Initialize the thread
QThread.__init__(self)
# Connect the signal with the handler residing in widget
self.widget = widget
self.data_update_signal.connect(self.widget.handle_data_update)
# Initialize the ZeroMQ socket
self.socket = zmq_context.socket(zmq.SUB)
self.filter_list = filter_list
for filter_name in self.filter_list:
self.socket.setsockopt(zmq.SUBSCRIBE, filter_name)
self.socket.connect(publish_endpoint)
def run(self):
"""
Receive the message with matching filter_key from publish_endpoint
via ZeroMQ, discard the filter_key message and emit the signal to
corresponding handler with the actual data wrapped in python dictionary
"""
while True:
message = self.socket.recv()
# if the message contains only filter key, discard it
if message in self.filter_list:
self.latest_msg_filter = message
continue
# Unpack the message into python dictionary
json_dict = json.loads(message)
# Apply a filter name to this data dictionary, in order to distinguish it
json_dict['filter_key'] = self.latest_msg_filter
# Emit the signal which would evoke the corresponding handler
self.data_update_signal.emit(json_dict)
class EmotionSpace(FigureCanvas):
def __init__(self, publish_endpoint, parent=None, width=5, height=4, dpi=100):
# Initialize a cache for incoming data.
self.max_data_len = 25
## Feeling dictionary stores dominant feelings with different timestamps.
## Format: { timestamp -> dominant_feeling_name }
self.feeling_dict = {}
## Modulator dictionary caches modulators value at different time points.
## Format:
## { modulator_name -> { timestamp -> modulator_value } }
self.modulator_dict = {}
# The modulator dicitonary should be initialized in the format before
# appending data.
self.has_modulator_dict_initialized = False
# The legend list used to show legend in the chart
self.legend_list = []
# Chosen 3 modulators to be the axes of 3-dimensional space.
self.modulator_axes = []
self.axes_group_box = QtGui.QGroupBox("Modulator Axes:")
# Initialize variables related to graphics.
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = Axes3D(self.fig)
self.axes.hold(False)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding
)
FigureCanvas.updateGeometry(self)
# Create and start ZeroMQ subscriber threads
self.zmq_sub_thread = ZmqMultiFiltersSubscriberThread(self,
publish_endpoint,
[
"PsiFeelingUpdaterAgent",
"PsiModulatorUpdaterAgent"
]
)
self.zmq_sub_thread.start()
# Initialize modulator dictionary and legend list
def initialize_modulator_dict(self, json_dict):
timestamp = json_dict['timestamp']
del json_dict['timestamp']
for k, v in json_dict.iteritems():
self.modulator_dict[k] = {}
self.modulator_dict[k][timestamp] = v
self.modulator_axes.append(k)
self.has_modulator_dict_initialized = True
#self.initialize_axes_group()
def initialize_axes_group(self):
vLayout = QtGui.QVBoxLayout()
for axes in self.emotion_space.get_axes_list():
vLayout.addWidget(QtGui.QCheckBox(axes))
self.axes_group.setLayout(vLayout)
axesGroupSizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum, True)
self.axes_group.setSizePolicy(axesGroupSizePolicy)
self.layout().insert(self.axes_group, 0)
def update_data(self, json_dict):
'''
Update the data of feeling and modulators.
As the parameter might be either feeling data or modulator data,
we return the state of the updating result, in which, 0 indicates
feeling dictionary has been updated, 1 indicates modulator dictionary
has been updated.
'''
if json_dict['filter_key'] == "PsiFeelingUpdaterAgent":
# Just leave feelings alone
del json_dict['filter_key']
timestamp = json_dict['timestamp']
del json_dict['timestamp']
# Get the feeling name with max value
dominant_feeling = max(json_dict, key = lambda k : json_dict.get(k))
# Cache the pair in the feeling dictionary
self.feeling_dict[timestamp] = dominant_feeling
# return state 0
return 0
elif json_dict['filter_key'] == "PsiModulatorUpdaterAgent":
# Remove filter key pair
del json_dict['filter_key']
if not self.has_modulator_dict_initialized:
self.initialize_modulator_dict(json_dict)
return
timestamp = json_dict['timestamp']
del json_dict['timestamp']
for k, v in json_dict.iteritems():
self.modulator_dict[k][timestamp] = v
# return state 1
return 1
else:
pass
@pyqtSlot(dict)
def handle_data_update(self, json_dict):
"""
Process the data in json format
"""
update_state = self.update_data(json_dict)
# Only update the graphic when the widget is visible and
# modulator data has been updated.
if self.isVisible() and update_state == 1:
self.do_draw()
def do_draw(self):
self.axes.clear()
X = []
Y = []
Z = []
m = self.modulator_axes
print '=========='
for k, v in self.feeling_dict.iteritems():
X.append(self.modulator_dict[m[0]][k])
Y.append(self.modulator_dict[m[1]][k])
Z.append(self.modulator_dict[m[2]][k])
print str(self.modulator_dict[m[0]][k]) + ':' \
+ str(self.modulator_dict[m[1]][k]) + ':' \
+ str(self.modulator_dict[m[2]][k])
print '=========='
self.axes.grid(True)
self.axes.plot(X, Y, Z, '-o')
self.draw()
def get_axes_list(self):
return self.modulator_axes
class EmotionSpaceExplorer(QtGui.QWidget):
def __init__(self, parent=None):
super(EmotionSpaceExplorer, self).__init__(parent)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding, True)
sizePolicy.setHeightForWidth(self.sizePolicy().hasHeightForWidth())
self.setSizePolicy(sizePolicy)
self.emotion_space = EmotionSpace("tcp://192.168.1.250:18002", self)
self.navigation_toolbar = NavigationToolbar(self.emotion_space, self)
mainLayout = QtGui.QVBoxLayout(self)
mainLayout.addWidget(self.emotion_space)
mainLayout.addWidget(self.navigation_toolbar)
self.setLayout(mainLayout)
| agpl-3.0 |
zigitax/pupy | pupy/packages/windows/amd64/psutil/_common.py | 68 | 6995 | # /usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common objects shared by all _ps* modules."""
from __future__ import division
import errno
import functools
import os
import socket
import stat
import sys
from collections import namedtuple
from socket import AF_INET, SOCK_STREAM, SOCK_DGRAM
try:
import threading
except ImportError:
import dummy_threading as threading
if sys.version_info >= (3, 4):
import enum
else:
enum = None
# --- constants
AF_INET6 = getattr(socket, 'AF_INET6', None)
AF_UNIX = getattr(socket, 'AF_UNIX', None)
STATUS_RUNNING = "running"
STATUS_SLEEPING = "sleeping"
STATUS_DISK_SLEEP = "disk-sleep"
STATUS_STOPPED = "stopped"
STATUS_TRACING_STOP = "tracing-stop"
STATUS_ZOMBIE = "zombie"
STATUS_DEAD = "dead"
STATUS_WAKE_KILL = "wake-kill"
STATUS_WAKING = "waking"
STATUS_IDLE = "idle" # BSD
STATUS_LOCKED = "locked" # BSD
STATUS_WAITING = "waiting" # BSD
CONN_ESTABLISHED = "ESTABLISHED"
CONN_SYN_SENT = "SYN_SENT"
CONN_SYN_RECV = "SYN_RECV"
CONN_FIN_WAIT1 = "FIN_WAIT1"
CONN_FIN_WAIT2 = "FIN_WAIT2"
CONN_TIME_WAIT = "TIME_WAIT"
CONN_CLOSE = "CLOSE"
CONN_CLOSE_WAIT = "CLOSE_WAIT"
CONN_LAST_ACK = "LAST_ACK"
CONN_LISTEN = "LISTEN"
CONN_CLOSING = "CLOSING"
CONN_NONE = "NONE"
if enum is None:
NIC_DUPLEX_FULL = 2
NIC_DUPLEX_HALF = 1
NIC_DUPLEX_UNKNOWN = 0
else:
class NicDuplex(enum.IntEnum):
NIC_DUPLEX_FULL = 2
NIC_DUPLEX_HALF = 1
NIC_DUPLEX_UNKNOWN = 0
globals().update(NicDuplex.__members__)
# --- functions
def usage_percent(used, total, _round=None):
"""Calculate percentage usage of 'used' against 'total'."""
try:
ret = (used / total) * 100
except ZeroDivisionError:
ret = 0
if _round is not None:
return round(ret, _round)
else:
return ret
def memoize(fun):
"""A simple memoize decorator for functions supporting (hashable)
positional arguments.
It also provides a cache_clear() function for clearing the cache:
>>> @memoize
... def foo()
... return 1
...
>>> foo()
1
>>> foo.cache_clear()
>>>
"""
@functools.wraps(fun)
def wrapper(*args, **kwargs):
key = (args, frozenset(sorted(kwargs.items())))
lock.acquire()
try:
try:
return cache[key]
except KeyError:
ret = cache[key] = fun(*args, **kwargs)
finally:
lock.release()
return ret
def cache_clear():
"""Clear cache."""
lock.acquire()
try:
cache.clear()
finally:
lock.release()
lock = threading.RLock()
cache = {}
wrapper.cache_clear = cache_clear
return wrapper
def isfile_strict(path):
"""Same as os.path.isfile() but does not swallow EACCES / EPERM
exceptions, see:
http://mail.python.org/pipermail/python-dev/2012-June/120787.html
"""
try:
st = os.stat(path)
except OSError as err:
if err.errno in (errno.EPERM, errno.EACCES):
raise
return False
else:
return stat.S_ISREG(st.st_mode)
def sockfam_to_enum(num):
"""Convert a numeric socket family value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
if enum is None:
return num
try:
return socket.AddressFamily(num)
except (ValueError, AttributeError):
return num
def socktype_to_enum(num):
"""Convert a numeric socket type value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
if enum is None:
return num
try:
return socket.AddressType(num)
except (ValueError, AttributeError):
return num
# --- Process.connections() 'kind' parameter mapping
conn_tmap = {
"all": ([AF_INET, AF_INET6, AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
"tcp": ([AF_INET, AF_INET6], [SOCK_STREAM]),
"tcp4": ([AF_INET], [SOCK_STREAM]),
"udp": ([AF_INET, AF_INET6], [SOCK_DGRAM]),
"udp4": ([AF_INET], [SOCK_DGRAM]),
"inet": ([AF_INET, AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
"inet4": ([AF_INET], [SOCK_STREAM, SOCK_DGRAM]),
"inet6": ([AF_INET6], [SOCK_STREAM, SOCK_DGRAM]),
}
if AF_INET6 is not None:
conn_tmap.update({
"tcp6": ([AF_INET6], [SOCK_STREAM]),
"udp6": ([AF_INET6], [SOCK_DGRAM]),
})
if AF_UNIX is not None:
conn_tmap.update({
"unix": ([AF_UNIX], [SOCK_STREAM, SOCK_DGRAM]),
})
del AF_INET, AF_INET6, AF_UNIX, SOCK_STREAM, SOCK_DGRAM
# --- namedtuples for psutil.* system-related functions
# psutil.swap_memory()
sswap = namedtuple('sswap', ['total', 'used', 'free', 'percent', 'sin',
'sout'])
# psutil.disk_usage()
sdiskusage = namedtuple('sdiskusage', ['total', 'used', 'free', 'percent'])
# psutil.disk_io_counters()
sdiskio = namedtuple('sdiskio', ['read_count', 'write_count',
'read_bytes', 'write_bytes',
'read_time', 'write_time'])
# psutil.disk_partitions()
sdiskpart = namedtuple('sdiskpart', ['device', 'mountpoint', 'fstype', 'opts'])
# psutil.net_io_counters()
snetio = namedtuple('snetio', ['bytes_sent', 'bytes_recv',
'packets_sent', 'packets_recv',
'errin', 'errout',
'dropin', 'dropout'])
# psutil.users()
suser = namedtuple('suser', ['name', 'terminal', 'host', 'started'])
# psutil.net_connections()
sconn = namedtuple('sconn', ['fd', 'family', 'type', 'laddr', 'raddr',
'status', 'pid'])
# psutil.net_if_addrs()
snic = namedtuple('snic', ['family', 'address', 'netmask', 'broadcast', 'ptp'])
# psutil.net_if_stats()
snicstats = namedtuple('snicstats', ['isup', 'duplex', 'speed', 'mtu'])
# --- namedtuples for psutil.Process methods
# psutil.Process.memory_info()
pmem = namedtuple('pmem', ['rss', 'vms'])
# psutil.Process.cpu_times()
pcputimes = namedtuple('pcputimes', ['user', 'system'])
# psutil.Process.open_files()
popenfile = namedtuple('popenfile', ['path', 'fd'])
# psutil.Process.threads()
pthread = namedtuple('pthread', ['id', 'user_time', 'system_time'])
# psutil.Process.uids()
puids = namedtuple('puids', ['real', 'effective', 'saved'])
# psutil.Process.gids()
pgids = namedtuple('pgids', ['real', 'effective', 'saved'])
# psutil.Process.io_counters()
pio = namedtuple('pio', ['read_count', 'write_count',
'read_bytes', 'write_bytes'])
# psutil.Process.ionice()
pionice = namedtuple('pionice', ['ioclass', 'value'])
# psutil.Process.ctx_switches()
pctxsw = namedtuple('pctxsw', ['voluntary', 'involuntary'])
# psutil.Process.connections()
pconn = namedtuple('pconn', ['fd', 'family', 'type', 'laddr', 'raddr',
'status'])
| bsd-3-clause |
konstruktoid/ansible-upstream | test/units/modules/cloud/amazon/test_api_gateway.py | 45 | 2355 | #
# (c) 2016 Michael De La Rue
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
import sys
import pytest
from ansible.module_utils.ec2 import HAS_BOTO3
from units.modules.utils import set_module_args
if not HAS_BOTO3:
pytestmark = pytest.mark.skip("test_api_gateway.py requires the `boto3` and `botocore` modules")
import ansible.modules.cloud.amazon.aws_api_gateway as agw
exit_return_dict = {}
def fake_exit_json(self, **kwargs):
""" store the kwargs given to exit_json rather than putting them out to stdout"""
global exit_return_dict
exit_return_dict = kwargs
sys.exit(0)
def test_upload_api(monkeypatch):
class FakeConnection:
def put_rest_api(self, *args, **kwargs):
assert kwargs["body"] == "the-swagger-text-is-fake"
return {"msg": "success!"}
def return_fake_connection(*args, **kwargs):
return FakeConnection()
monkeypatch.setattr(agw, "boto3_conn", return_fake_connection)
monkeypatch.setattr(agw.AnsibleModule, "exit_json", fake_exit_json)
set_module_args({
"api_id": "fred",
"state": "present",
"swagger_text": "the-swagger-text-is-fake",
"region": 'mars-north-1',
})
with pytest.raises(SystemExit):
agw.main()
assert exit_return_dict["changed"]
def test_warn_if_region_not_specified():
set_module_args({
"name": "aws_api_gateway",
"state": "present",
"runtime": 'python2.7',
"role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
"handler": 'lambda_python.my_handler'})
with pytest.raises(SystemExit):
print(agw.main())
| gpl-3.0 |
eugene1g/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/model/patchlog.py | 119 | 3107 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
from google.appengine.ext import db
class PatchLog(db.Model):
attachment_id = db.IntegerProperty()
queue_name = db.StringProperty()
date = db.DateTimeProperty(auto_now_add=True)
bot_id = db.StringProperty()
retry_count = db.IntegerProperty(default=0)
status_update_count = db.IntegerProperty(default=0)
finished = db.BooleanProperty(default=False)
wait_duration = db.IntegerProperty()
process_duration = db.IntegerProperty()
@classmethod
def lookup(cls, attachment_id, queue_name):
key = cls._generate_key(attachment_id, queue_name)
return cls.get_or_insert(key, attachment_id=attachment_id, queue_name=queue_name)
@classmethod
def lookup_if_exists(cls, attachment_id, queue_name):
key = cls._generate_key(attachment_id, queue_name)
return cls.get_by_key_name(key)
def calculate_wait_duration(self):
time_delta = datetime.utcnow() - self.date
self.wait_duration = int(self._time_delta_to_seconds(time_delta))
def calculate_process_duration(self):
time_delta = datetime.utcnow() - self.date
self.process_duration = int(self._time_delta_to_seconds(time_delta)) - (self.wait_duration or 0)
@classmethod
def _generate_key(cls, attachment_id, queue_name):
return "%s-%s" % (attachment_id, queue_name)
# Needed to support Python 2.5's lack of timedelta.total_seconds().
@classmethod
def _time_delta_to_seconds(cls, time_delta):
return time_delta.seconds + time_delta.days * 24 * 3600
| bsd-3-clause |
tastynoodle/django | tests/admin_changelist/models.py | 64 | 2568 | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Event(models.Model):
# Oracle can have problems with a column named "date"
date = models.DateField(db_column="event_date")
class Parent(models.Model):
name = models.CharField(max_length=128)
class Child(models.Model):
parent = models.ForeignKey(Parent, editable=False, null=True)
name = models.CharField(max_length=30, blank=True)
age = models.IntegerField(null=True, blank=True)
class Genre(models.Model):
name = models.CharField(max_length=20)
class Band(models.Model):
name = models.CharField(max_length=20)
nr_of_members = models.PositiveIntegerField()
genres = models.ManyToManyField(Genre)
@python_2_unicode_compatible
class Musician(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(Musician, through='Membership')
def __str__(self):
return self.name
class Membership(models.Model):
music = models.ForeignKey(Musician)
group = models.ForeignKey(Group)
role = models.CharField(max_length=15)
class Quartet(Group):
pass
class ChordsMusician(Musician):
pass
class ChordsBand(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(ChordsMusician, through='Invitation')
class Invitation(models.Model):
player = models.ForeignKey(ChordsMusician)
band = models.ForeignKey(ChordsBand)
instrument = models.CharField(max_length=15)
class Swallow(models.Model):
origin = models.CharField(max_length=255)
load = models.FloatField()
speed = models.FloatField()
class Meta:
ordering = ('speed', 'load')
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #17198.
"""
bool = models.BooleanField(default=True)
class OrderedObjectManager(models.Manager):
def get_queryset(self):
return super(OrderedObjectManager, self).get_queryset().order_by('number')
class OrderedObject(models.Model):
"""
Model with Manager that defines a default order.
Refs #17198.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
number = models.IntegerField(default=0, db_column='number_val')
objects = OrderedObjectManager()
class CustomIdUser(models.Model):
uuid = models.AutoField(primary_key=True)
| bsd-3-clause |
the-it/WS_THEbotIT | archive/online/2015/151022_orbis_insert_chapter.py | 1 | 1745 | # -*- coding: utf-8 -*-
__author__ = 'eso'
import sys
sys.path.append('../../')
#from tools.catscan import CatScan
import re
import requests
import pywikibot
#Skript doesn't work at the moment. It seems, that the database is lacking behind. Let's wait for a few days.
#Mazbe then all backlinks will be found.
def add_zeros(number, digits):
number_str = str(number)
if number < 10:
for members in range(digits-1):
number_str = "0" + number_str
elif number < 100:
for members in range(digits-2):
number_str = "0" + number_str
elif number < 1000:
for members in range(digits-3):
number_str = "0" + number_str
return number_str
site = pywikibot.Site()
for i in range(6, 308):#467
page = pywikibot.Page(site, 'Seite:OrbisPictus {}.jpg'.format(add_zeros(i, 3)))
print(page.title())
test = page.backlinks(namespaces=0)
print(next(test))
try:
test2 = next(test).title()
is_there_match = re.search('\{\{Seitenstatus2\|\[\[Johann Amos Comenius\]\]\|\[\[Orbis sensualium pictus\]\]\|Orbis sensualium pictus\|\}\}', page.text)
if is_there_match:
new_text = re.sub('\{\{Seitenstatus2\|\[\[Johann Amos Comenius\]\]\|\[\[Orbis sensualium pictus\]\]\|Orbis sensualium pictus\|\}\}',
'{{Seitenstatus2|[[Johann Amos Comenius]]|[[Orbis sensualium pictus]]|Orbis sensualium pictus|[[%s]]}}' % test2,
page.text)
print('{{Seitenstatus2|[[Johann Amos Comenius]]|[[Orbis sensualium pictus]]|Orbis sensualium pictus|[[%s]]}}' % test2)
page.text = new_text
page.save(summary='Kapitel eingefügt', botflag=True)
except:
pass | mit |
IntelLabs/numba | numba/cuda/tests/cudapy/test_reduction.py | 6 | 2528 | import numpy as np
from numba import cuda
from numba.core.config import ENABLE_CUDASIM
from numba.cuda.testing import CUDATestCase
import unittest
# Avoid recompilation of the sum_reduce function by keeping it at global scope
sum_reduce = cuda.Reduce(lambda a, b: a + b)
class TestReduction(CUDATestCase):
def _sum_reduce(self, n):
A = (np.arange(n, dtype=np.float64) + 1)
expect = A.sum()
got = sum_reduce(A)
self.assertEqual(expect, got)
def test_sum_reduce(self):
if ENABLE_CUDASIM:
# Minimal test set for the simulator (which only wraps
# functools.reduce)
test_sizes = [ 1, 16 ]
else:
# Tests around the points where blocksize changes, and around larger
# powers of two, sums of powers of two, and some "random" sizes
test_sizes = [ 1, 15, 16, 17, 127, 128, 129, 1023, 1024,
1025, 1536, 1048576, 1049600, 1049728, 34567 ]
# Avoid recompilation by keeping sum_reduce here
for n in test_sizes:
self._sum_reduce(n)
def test_empty_array_host(self):
A = (np.arange(0, dtype=np.float64) + 1)
expect = A.sum()
got = sum_reduce(A)
self.assertEqual(expect, got)
def test_empty_array_device(self):
A = (np.arange(0, dtype=np.float64) + 1)
dA = cuda.to_device(A)
expect = A.sum()
got = sum_reduce(dA)
self.assertEqual(expect, got)
def test_prod_reduce(self):
prod_reduce = cuda.reduce(lambda a, b: a * b)
A = (np.arange(64, dtype=np.float64) + 1)
expect = A.prod()
got = prod_reduce(A, init=1)
np.testing.assert_allclose(expect, got)
def test_max_reduce(self):
max_reduce = cuda.Reduce(lambda a, b: max(a, b))
A = (np.arange(3717, dtype=np.float64) + 1)
expect = A.max()
got = max_reduce(A, init=0)
self.assertEqual(expect, got)
def test_non_identity_init(self):
init = 3
A = (np.arange(10, dtype=np.float64) + 1)
expect = A.sum() + init
got = sum_reduce(A, init=init)
self.assertEqual(expect, got)
def test_result_on_device(self):
A = (np.arange(10, dtype=np.float64) + 1)
got = cuda.to_device(np.zeros(1, dtype=np.float64))
expect = A.sum()
res = sum_reduce(A, res=got)
self.assertIsNone(res)
self.assertEqual(expect, got[0])
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
SOKP/external_chromium_org | chrome/common/extensions/docs/server2/samples_model_test.py | 32 | 1264 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import unittest
from server_instance import ServerInstance
from test_file_system import TestFileSystem
from test_util import Server2Path
def _ReadLocalFile(filename):
base_path = Server2Path('test_data', 'samples_data_source')
with open(os.path.join(base_path, filename), 'r') as f:
return f.read()
class _FakeCache(object):
def __init__(self, obj):
self._cache = obj
def GetFromFileListing(self, _):
getter = lambda: 0
getter.Get = lambda: self._cache
return getter
class SamplesModelSourceTest(unittest.TestCase):
def setUp(self):
server_instance = ServerInstance.ForTest(file_system=TestFileSystem({}))
self._samples_model = server_instance.platform_bundle.GetSamplesModel(
'apps')
self._samples_model._samples_cache = _FakeCache(json.loads(_ReadLocalFile(
'samples.json')))
def testFilterSamples(self):
self.assertEquals(json.loads(_ReadLocalFile('expected.json')),
self._samples_model.FilterSamples('bobaloo'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
githubutilities/LeetCode | Python/binary-watch.py | 3 | 1396 | # Time: O(1)
# Space: O(1)
# A binary watch has 4 LEDs on the top which represent the hours (0-11),
# and the 6 LEDs on the bottom represent the minutes (0-59).
#
# Each LED represents a zero or one, with the least significant bit on the right.
#
# For example, the above binary watch reads "3:25".
#
# Given a non-negative integer n which represents the number of LEDs that are currently on,
# return all possible times the watch could represent.
#
# Example:
#
# Input: n = 1
# Return: ["1:00", "2:00", "4:00", "8:00", "0:01", "0:02", "0:04", "0:08", "0:16", "0:32"]
# Note:
# The order of output does not matter.
# The hour must not contain a leading zero, for example "01:00" is not valid, it should be "1:00".
class Solution(object):
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
def bit_count(bits):
count = 0
while bits:
bits &= bits-1
count += 1
return count
return ['%d:%02d' % (h, m)
for h in xrange(12) for m in xrange(60)
if bit_count(h) + bit_count(m) == num]
def readBinaryWatch2(self, num):
"""
:type num: int
:rtype: List[str]
"""
return ['{0}:{1}'.format(str(h), str(m).zfill(2)) for h in range(12) for m in range(60) if (bin(h) + bin(m)).count('1') == num]
| mit |
whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/pytz/exceptions.py | 657 | 1333 | '''
Custom exceptions raised by pytz.
'''
__all__ = [
'UnknownTimeZoneError', 'InvalidTimeError', 'AmbiguousTimeError',
'NonExistentTimeError',
]
class UnknownTimeZoneError(KeyError):
'''Exception raised when pytz is passed an unknown timezone.
>>> isinstance(UnknownTimeZoneError(), LookupError)
True
This class is actually a subclass of KeyError to provide backwards
compatibility with code relying on the undocumented behavior of earlier
pytz releases.
>>> isinstance(UnknownTimeZoneError(), KeyError)
True
'''
pass
class InvalidTimeError(Exception):
'''Base class for invalid time exceptions.'''
class AmbiguousTimeError(InvalidTimeError):
'''Exception raised when attempting to create an ambiguous wallclock time.
At the end of a DST transition period, a particular wallclock time will
occur twice (once before the clocks are set back, once after). Both
possibilities may be correct, unless further information is supplied.
See DstTzInfo.normalize() for more info
'''
class NonExistentTimeError(InvalidTimeError):
'''Exception raised when attempting to create a wallclock time that
cannot exist.
At the start of a DST transition period, the wallclock time jumps forward.
The instants jumped over never occur.
'''
| mit |
tedder/ansible-modules-core | cloud/amazon/ec2_scaling_policy.py | 53 | 6900 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: ec2_scaling_policy
short_description: Create or delete AWS scaling policies for Autoscaling groups
description:
- Can create or delete scaling policies for autoscaling groups
- Referenced autoscaling groups must already exist
version_added: "1.6"
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
- register or deregister the policy
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the scaling policy
required: true
asg_name:
description:
- Name of the associated autoscaling group
required: true
adjustment_type:
description:
- The type of change in capacity of the autoscaling group
required: false
choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']
scaling_adjustment:
description:
- The amount by which the autoscaling group is adjusted by the policy
required: false
min_adjustment_step:
description:
- Minimum amount of adjustment when policy is triggered
required: false
cooldown:
description:
- The minimum period of time between which autoscaling actions can take place
required: false
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- ec2_scaling_policy:
state: present
region: US-XXX
name: "scaledown-policy"
adjustment_type: "ChangeInCapacity"
asg_name: "slave-pool"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 300
'''
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto.ec2.autoscale
from boto.ec2.autoscale import ScalingPolicy
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_scaling_policy(connection, module):
sp_name = module.params.get('name')
adjustment_type = module.params.get('adjustment_type')
asg_name = module.params.get('asg_name')
scaling_adjustment = module.params.get('scaling_adjustment')
min_adjustment_step = module.params.get('min_adjustment_step')
cooldown = module.params.get('cooldown')
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
if not scalingPolicies:
sp = ScalingPolicy(
name=sp_name,
adjustment_type=adjustment_type,
as_name=asg_name,
scaling_adjustment=scaling_adjustment,
min_adjustment_step=min_adjustment_step,
cooldown=cooldown)
try:
connection.create_scaling_policy(sp)
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
policy = scalingPolicies[0]
changed = False
# min_adjustment_step attribute is only relevant if the adjustment_type
# is set to percentage change in capacity, so it is a special case
if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity':
if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'):
changed = True
# set the min adjustment step incase the user decided to change their
# adjustment type to percentage
setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
# check the remaining attributes
for attr in ('adjustment_type','scaling_adjustment','cooldown'):
if getattr(policy, attr) != module.params.get(attr):
changed = True
setattr(policy, attr, module.params.get(attr))
try:
if changed:
connection.create_scaling_policy(policy)
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
module.fail_json(msg=str(e))
def delete_scaling_policy(connection, module):
sp_name = module.params.get('name')
asg_name = module.params.get('asg_name')
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
if scalingPolicies:
try:
connection.delete_policy(sp_name, asg_name)
module.exit_json(changed=True)
except BotoServerError as e:
module.exit_json(changed=False, msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name = dict(required=True, type='str'),
adjustment_type = dict(type='str', choices=['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']),
asg_name = dict(required=True, type='str'),
scaling_adjustment = dict(type='int'),
min_adjustment_step = dict(type='int'),
cooldown = dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
state = module.params.get('state')
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg = str(e))
if state == 'present':
create_scaling_policy(connection, module)
elif state == 'absent':
delete_scaling_policy(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
mbkumar/pymatgen | pymatgen/command_line/mcsqs_caller.py | 1 | 8263 | """
Module to call mcsqs, distributed with AT-AT
https://www.brown.edu/Departments/Engineering/Labs/avdw/atat/
"""
import os
import warnings
from subprocess import Popen, TimeoutExpired
from typing import Dict, Union, List, NamedTuple, Optional
from pathlib import Path
from monty.dev import requires
from monty.os.path import which
import tempfile
from pymatgen import Structure
class Sqs(NamedTuple):
"""
Return type for run_mcsqs.
"""
bestsqs: Structure
objective_function: Union[float, str]
allsqs: List
directory: str
@requires(
which("mcsqs") and which("str2cif"),
"run_mcsqs requires first installing AT-AT, "
"see https://www.brown.edu/Departments/Engineering/Labs/avdw/atat/",
)
def run_mcsqs(
structure: Structure,
clusters: Dict[int, float],
scaling: Union[int, List[int]],
search_time: float = 60,
directory: Optional[str] = None,
instances: Optional[int] = None,
temperature: Union[int, float] = 1,
wr: float = 1,
wn: float = 1,
wd: float = 0.5,
tol: float = 1e-3,
) -> Sqs:
"""
Helper function for calling mcsqs with different arguments
Args:
structure (Structure): Disordered pymatgen Structure object
clusters (dict): Dictionary of cluster interactions with entries in the form
number of atoms: cutoff in angstroms
scaling (int or list): Scaling factor to determine supercell. Two options are possible:
a. (preferred) Scales number of atoms, e.g., for a structure with 8 atoms,
scaling=4 would lead to a 32 atom supercell
b. A sequence of three scaling factors, e.g., [2, 1, 1], which
specifies that the supercell should have dimensions 2a x b x c
Keyword Args:
search_time (float): Time spent looking for the ideal SQS in minutes (default: 60)
directory (str): Directory to run mcsqs calculation and store files (default: None
runs calculations in a temp directory)
instances (int): Specifies the number of parallel instances of mcsqs to run
(default: number of cpu cores detected by Python)
temperature (int or float): Monte Carlo temperature (default: 1), "T" in atat code
wr (int or float): Weight assigned to range of perfect correlation match in objective
function (default = 1)
wn (int or float): Multiplicative decrease in weight per additional point in cluster (default: 1)
wd (int or float): Exponent of decay in weight as function of cluster diameter (default: 0.5)
tol (int or float): Tolerance for matching correlations (default: 1e-3)
Returns:
Tuple of Pymatgen structure SQS of the input structure, the mcsqs objective function,
list of all SQS structures, and the directory where calculations are run
"""
num_atoms = len(structure)
if structure.is_ordered:
raise ValueError("Pick a disordered structure")
if instances is None:
# os.cpu_count() can return None if detection fails
instances = os.cpu_count()
original_directory = os.getcwd()
if not directory:
directory = tempfile.mkdtemp()
os.chdir(directory)
if isinstance(scaling, (int, float)):
if scaling % 1:
raise ValueError("Scaling should be an integer, not {}".format(scaling))
mcsqs_find_sqs_cmd = ["mcsqs", "-n {}".format(scaling * num_atoms)]
else:
# Set supercell to identity (will make supercell with pymatgen)
with open("sqscell.out", "w") as f:
f.write("1\n1 0 0\n0 1 0\n0 0 1\n")
structure = structure * scaling
mcsqs_find_sqs_cmd = ["mcsqs", "-rc", "-n {}".format(num_atoms)]
structure.to(filename="rndstr.in")
# Generate clusters
mcsqs_generate_clusters_cmd = ["mcsqs"]
for num in clusters:
mcsqs_generate_clusters_cmd.append("-" + str(num) + "=" + str(clusters[num]))
# Run mcsqs to find clusters
p = Popen(mcsqs_generate_clusters_cmd)
p.communicate()
# Generate SQS structures
add_ons = [
"-T {}".format(temperature),
"-wr {}".format(wr),
"-wn {}".format(wn),
"-wd {}".format(wd),
"-tol {}".format(tol),
]
mcsqs_find_sqs_processes = []
if instances and instances > 1:
# if multiple instances, run a range of commands using "-ip"
for i in range(instances):
instance_cmd = ["-ip {}".format(i + 1)]
cmd = mcsqs_find_sqs_cmd + add_ons + instance_cmd
p = Popen(cmd)
mcsqs_find_sqs_processes.append(p)
else:
# run normal mcsqs command
cmd = mcsqs_find_sqs_cmd + add_ons
p = Popen(cmd)
mcsqs_find_sqs_processes.append(p)
try:
for idx, p in enumerate(mcsqs_find_sqs_processes):
p.communicate(timeout=search_time * 60)
if instances and instances > 1:
p = Popen(["mcsqs", "-best"])
p.communicate()
if os.path.exists("bestsqs.out") and os.path.exists("bestcorr.out"):
return _parse_sqs_path(".")
raise RuntimeError("mcsqs exited before timeout reached")
except TimeoutExpired:
for p in mcsqs_find_sqs_processes:
p.kill()
p.communicate()
# Find the best sqs structures
if instances and instances > 1:
if not os.path.exists("bestcorr1.out"):
raise RuntimeError(
"mcsqs did not generate output files, "
"is search_time sufficient or are number of instances too high?"
)
p = Popen(["mcsqs", "-best"])
p.communicate()
if os.path.exists("bestsqs.out") and os.path.exists("bestcorr.out"):
sqs = _parse_sqs_path(".")
return sqs
else:
os.chdir(original_directory)
raise TimeoutError("Cluster expansion took too long.")
def _parse_sqs_path(path) -> Sqs:
"""
Private function to parse mcsqs output directory
Args:
path: directory to perform parsing
Returns:
Tuple of Pymatgen structure SQS of the input structure, the mcsqs objective function,
list of all SQS structures, and the directory where calculations are run
"""
path = Path(path)
# detected instances will be 0 if mcsqs was run in series, or number of instances
detected_instances = len(list(path.glob("bestsqs*[0-9]*")))
# Convert best SQS structure to cif file and pymatgen Structure
p = Popen("str2cif < bestsqs.out > bestsqs.cif", shell=True, cwd=path)
p.communicate()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
bestsqs = Structure.from_file(path / "bestsqs.cif")
# Get best SQS objective function
with open(path / "bestcorr.out", "r") as f:
lines = f.readlines()
objective_function_str = lines[-1].split("=")[-1].strip()
objective_function: Union[float, str]
if objective_function_str != "Perfect_match":
objective_function = float(objective_function_str)
else:
objective_function = "Perfect_match"
# Get all SQS structures and objective functions
allsqs = []
for i in range(detected_instances):
sqs_out = "bestsqs{}.out".format(i + 1)
sqs_cif = "bestsqs{}.cif".format(i + 1)
corr_out = "bestcorr{}.out".format(i + 1)
p = Popen("str2cif <" + sqs_out + ">" + sqs_cif, shell=True, cwd=path)
p.communicate()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sqs = Structure.from_file(sqs_cif)
with open(path / corr_out, "r") as f:
lines = f.readlines()
objective_function_str = lines[-1].split("=")[-1].strip()
obj: Union[float, str]
if objective_function_str != "Perfect_match":
obj = float(objective_function_str)
else:
obj = "Perfect_match"
allsqs.append({"structure": sqs, "objective_function": obj})
return Sqs(
bestsqs=bestsqs,
objective_function=objective_function,
allsqs=allsqs,
directory=str(path.resolve()),
)
| mit |
ethereum/pyrlp | rlp/lazy.py | 1 | 5999 | from collections.abc import Iterable, Sequence
from .codec import consume_length_prefix, consume_payload
from .exceptions import DecodingError
from .atomic import Atomic
def decode_lazy(rlp, sedes=None, **sedes_kwargs):
"""Decode an RLP encoded object in a lazy fashion.
If the encoded object is a bytestring, this function acts similar to
:func:`rlp.decode`. If it is a list however, a :class:`LazyList` is
returned instead. This object will decode the string lazily, avoiding
both horizontal and vertical traversing as much as possible.
The way `sedes` is applied depends on the decoded object: If it is a string
`sedes` deserializes it as a whole; if it is a list, each element is
deserialized individually. In both cases, `sedes_kwargs` are passed on.
Note that, if a deserializer is used, only "horizontal" but not
"vertical lazyness" can be preserved.
:param rlp: the RLP string to decode
:param sedes: an object implementing a method ``deserialize(code)`` which
is used as described above, or ``None`` if no
deserialization should be performed
:param \*\*sedes_kwargs: additional keyword arguments that will be passed
to the deserializers
:returns: either the already decoded and deserialized object (if encoded as
a string) or an instance of :class:`rlp.LazyList`
"""
item, end = consume_item_lazy(rlp, 0)
if end != len(rlp):
raise DecodingError('RLP length prefix announced wrong length', rlp)
if isinstance(item, LazyList):
item.sedes = sedes
item.sedes_kwargs = sedes_kwargs
return item
elif sedes:
return sedes.deserialize(item, **sedes_kwargs)
else:
return item
def consume_item_lazy(rlp, start):
"""Read an item from an RLP string lazily.
If the length prefix announces a string, the string is read; if it
announces a list, a :class:`LazyList` is created.
:param rlp: the rlp string to read from
:param start: the position at which to start reading
:returns: a tuple ``(item, end)`` where ``item`` is the read string or a
:class:`LazyList` and ``end`` is the position of the first
unprocessed byte.
"""
p, t, l, s = consume_length_prefix(rlp, start)
if t is bytes:
item, _, end = consume_payload(rlp, p, s, bytes, l)
return item, end
else:
assert t is list
return LazyList(rlp, s, s + l), s + l
class LazyList(Sequence):
"""A RLP encoded list which decodes itself when necessary.
Both indexing with positive indices and iterating are supported.
Getting the length with :func:`len` is possible as well but requires full
horizontal encoding.
:param rlp: the rlp string in which the list is encoded
:param start: the position of the first payload byte of the encoded list
:param end: the position of the last payload byte of the encoded list
:param sedes: a sedes object which deserializes each element of the list,
or ``None`` for no deserialization
:param \*\*sedes_kwargs: keyword arguments which will be passed on to the
deserializer
"""
def __init__(self, rlp, start, end, sedes=None, **sedes_kwargs):
self.rlp = rlp
self.start = start
self.end = end
self.index = start
self._elements = []
self._len = None
self.sedes = sedes
self.sedes_kwargs = sedes_kwargs
def next(self):
if self.index == self.end:
self._len = len(self._elements)
raise StopIteration
assert self.index < self.end
item, end = consume_item_lazy(self.rlp, self.index)
self.index = end
if self.sedes:
item = self.sedes.deserialize(item, **self.sedes_kwargs)
self._elements.append(item)
return item
def __getitem__(self, i):
if isinstance(i, slice):
if i.step is not None:
raise TypeError("Step not supported")
start = i.start
stop = i.stop
else:
start = i
stop = i + 1
if stop is None:
stop = self.end - 1
try:
while len(self._elements) < stop:
self.next()
except StopIteration:
assert self.index == self.end
raise IndexError('Index %s out of range' % i)
if isinstance(i, slice):
return self._elements[start:stop]
else:
return self._elements[start]
def __len__(self):
if not self._len:
try:
while True:
self.next()
except StopIteration:
self._len = len(self._elements)
return self._len
def peek(rlp, index, sedes=None):
"""Get a specific element from an rlp encoded nested list.
This function uses :func:`rlp.decode_lazy` and, thus, decodes only the
necessary parts of the string.
Usage example::
>>> import rlp
>>> rlpdata = rlp.encode([1, 2, [3, [4, 5]]])
>>> rlp.peek(rlpdata, 0, rlp.sedes.big_endian_int)
1
>>> rlp.peek(rlpdata, [2, 0], rlp.sedes.big_endian_int)
3
:param rlp: the rlp string
:param index: the index of the element to peek at (can be a list for
nested data)
:param sedes: a sedes used to deserialize the peeked at object, or `None`
if no deserialization should be performed
:raises: :exc:`IndexError` if `index` is invalid (out of range or too many
levels)
"""
ll = decode_lazy(rlp)
if not isinstance(index, Iterable):
index = [index]
for i in index:
if isinstance(ll, Atomic):
raise IndexError('Too many indices given')
ll = ll[i]
if sedes:
return sedes.deserialize(ll)
else:
return ll
| mit |
rdeheele/odoo | addons/hw_posbox_upgrade/__openerp__.py | 313 | 1696 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'PosBox Software Upgrader',
'version': '1.0',
'category': 'Hardware Drivers',
'website': 'https://www.odoo.com/page/point-of-sale',
'sequence': 6,
'summary': 'Allows to remotely upgrade the PosBox software',
'description': """
PosBox Software Upgrader
========================
This module allows to remotely upgrade the PosBox software to a
new version. This module is specific to the PosBox setup and environment
and should not be installed on regular openerp servers.
""",
'author': 'OpenERP SA',
'depends': ['hw_proxy'],
'test': [
],
'installable': False,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Idematica/django-oscar | sites/demo/urls.py | 3 | 1030 | from django.conf.urls import patterns, include
from django.conf import settings
from django.contrib import admin
from django.conf.urls.static import static
from stores.app import application as stores_app
from stores.dashboard.app import application as dashboard_app
from apps.app import application
from datacash.dashboard.app import application as datacash_app
# These need to be imported into this namespace
from oscar.views import handler500, handler404, handler403
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
# Stores extension
(r'^stores/', include(stores_app.urls)),
(r'^dashboard/stores/', include(dashboard_app.urls)),
# PayPal extension
(r'^checkout/paypal/', include('paypal.express.urls')),
# Datacash extension
(r'^dashboard/datacash/', include(datacash_app.urls)),
(r'', include(application.urls)),
)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| bsd-3-clause |
ahorincar/bloodhound_solr_plugin | bhsolr/search_resources/wiki_search.py | 1 | 1255 | # -*- coding: UTF-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from trac.wiki import WikiSystem, WikiPage
from bhsearch.search_resources.wiki_search import WikiIndexer
from bhsearch.search_resources.base import BaseIndexer
class WikiSearchModel(BaseIndexer):
def get_entries_for_index(self):
page_names = WikiSystem(self.env).get_pages()
for page_name in page_names:
page = WikiPage(self.env, page_name)
yield WikiIndexer(self.env).build_doc(page)
| apache-2.0 |
angadpc/Alexa-Project- | requests/packages/chardet/mbcsgroupprober.py | 2769 | 1967 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
| mit |
raccoongang/edx-platform | cms/celery.py | 3 | 1372 | """
Import celery, load its settings from the django settings
and auto discover tasks in all installed django apps.
Taken from: http://celery.readthedocs.org/en/latest/django/first-steps-with-django.html
"""
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
from openedx.core.lib.celery.routers import AlternateEnvironmentRouter
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proj.settings')
APP = Celery('proj')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
APP.config_from_object('django.conf:settings')
APP.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
class Router(AlternateEnvironmentRouter):
"""
An implementation of AlternateEnvironmentRouter, for routing tasks to non-cms queues.
"""
@property
def alternate_env_tasks(self):
"""
Defines alternate environment tasks, as a dict of form { task_name: alternate_queue }
"""
return {
'openedx.core.djangoapps.content.block_structure.tasks.update_course_in_cache': 'lms',
'openedx.core.djangoapps.content.block_structure.tasks.update_course_in_cache_v2': 'lms',
'lms.djangoapps.grades.tasks.compute_all_grades_for_course': 'lms',
}
| agpl-3.0 |
GNS3/gns3-server | gns3server/schemas/custom_adapters.py | 1 | 1672 | #!/usr/bin/env python
#
# Copyright (C) 2016 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
CUSTOM_ADAPTERS_ARRAY_SCHEMA = {
"type": "array",
"default": [],
"items": {
"type": "object",
"description": "Custom properties",
"properties": {
"adapter_number": {
"type": "integer",
"description": "Adapter number"
},
"port_name": {
"type": "string",
"description": "Custom port name",
"minLength": 1,
},
"adapter_type": {
"type": "string",
"description": "Custom adapter type",
"minLength": 1,
},
"mac_address": {
"description": "Custom MAC address",
"type": "string",
"minLength": 1,
"pattern": "^([0-9a-fA-F]{2}[:]){5}([0-9a-fA-F]{2})$"
},
},
"additionalProperties": False,
"required": ["adapter_number"]
},
}
| gpl-3.0 |
strint/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/estimator_utils_test.py | 9 | 6856 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers import feature_column
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.dataframe import estimator_utils
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
def setup_test_df_3layer():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = mocks.MockSeries("a_series",
mocks.MockTensor("Tensor a", dtypes.int32))
df["b"] = mocks.MockSeries(
"b_series", mocks.MockSparseTensor("SparseTensor b", dtypes.int32))
df["c"] = mocks.MockSeries("c_series",
mocks.MockTensor("Tensor c", dtypes.int32))
df["d"] = mocks.MockSeries(
"d_series", mocks.MockSparseTensor("SparseTensor d", dtypes.int32))
df["e"] = learn.TransformedSeries([df["a"], df["b"]],
mocks.Mock2x2Transform("iue", "eui", "snt"),
"out1")
df["f"] = learn.TransformedSeries([df["c"], df["d"]],
mocks.Mock2x2Transform("iue", "eui", "snt"),
"out2")
df["g"] = learn.TransformedSeries([df["e"], df["f"]],
mocks.Mock2x2Transform("iue", "eui", "snt"),
"out1")
return df
class EstimatorUtilsTest(test.TestCase):
"""Test of estimator utils."""
def test_to_feature_columns_and_input_fn(self):
df = setup_test_df_3layer()
feature_columns, input_fn = (
estimator_utils.to_feature_columns_and_input_fn(
df,
base_input_keys_with_defaults={"a": 1,
"b": 2,
"c": 3,
"d": 4},
label_keys=["g"],
feature_keys=["a", "b", "f"]))
expected_feature_column_a = feature_column.DataFrameColumn(
"a",
learn.PredefinedSeries(
"a",
parsing_ops.FixedLenFeature(tensor_shape.unknown_shape(),
dtypes.int32, 1)))
expected_feature_column_b = feature_column.DataFrameColumn(
"b",
learn.PredefinedSeries("b", parsing_ops.VarLenFeature(dtypes.int32)))
expected_feature_column_f = feature_column.DataFrameColumn(
"f",
learn.TransformedSeries([
learn.PredefinedSeries("c",
parsing_ops.FixedLenFeature(
tensor_shape.unknown_shape(),
dtypes.int32, 3)),
learn.PredefinedSeries("d", parsing_ops.VarLenFeature(dtypes.int32))
], mocks.Mock2x2Transform("iue", "eui", "snt"), "out2"))
expected_feature_columns = [
expected_feature_column_a, expected_feature_column_b,
expected_feature_column_f
]
self.assertEqual(sorted(expected_feature_columns), sorted(feature_columns))
base_features, labels = input_fn()
expected_base_features = {
"a": mocks.MockTensor("Tensor a", dtypes.int32),
"b": mocks.MockSparseTensor("SparseTensor b", dtypes.int32),
"c": mocks.MockTensor("Tensor c", dtypes.int32),
"d": mocks.MockSparseTensor("SparseTensor d", dtypes.int32)
}
self.assertEqual(expected_base_features, base_features)
expected_labels = mocks.MockTensor("Out iue", dtypes.int32)
self.assertEqual(expected_labels, labels)
self.assertEqual(3, len(feature_columns))
def test_to_feature_columns_and_input_fn_no_labels(self):
df = setup_test_df_3layer()
feature_columns, input_fn = (
estimator_utils.to_feature_columns_and_input_fn(
df,
base_input_keys_with_defaults={"a": 1,
"b": 2,
"c": 3,
"d": 4},
feature_keys=["a", "b", "f"]))
base_features, labels = input_fn()
expected_base_features = {
"a": mocks.MockTensor("Tensor a", dtypes.int32),
"b": mocks.MockSparseTensor("SparseTensor b", dtypes.int32),
"c": mocks.MockTensor("Tensor c", dtypes.int32),
"d": mocks.MockSparseTensor("SparseTensor d", dtypes.int32)
}
self.assertEqual(expected_base_features, base_features)
expected_labels = {}
self.assertEqual(expected_labels, labels)
self.assertEqual(3, len(feature_columns))
def test_to_estimator_not_disjoint(self):
df = setup_test_df_3layer()
# pylint: disable=unused-variable
def get_not_disjoint():
feature_columns, input_fn = (
estimator_utils.to_feature_columns_and_input_fn(
df,
base_input_keys_with_defaults={"a": 1,
"b": 2,
"c": 3,
"d": 4},
label_keys=["f"],
feature_keys=["a", "b", "f"]))
self.assertRaises(ValueError, get_not_disjoint)
if __name__ == "__main__":
test.main()
| apache-2.0 |
buqing2009/MissionPlanner | Lib/site-packages/numpy/doc/misc.py | 59 | 5765 | """
=============
Miscellaneous
=============
IEEE 754 Floating Point Special Values:
-----------------------------------------------
Special values defined in numpy: nan, inf,
NaNs can be used as a poor-man's mask (if you don't care what the
original value was)
Note: cannot use equality to test NaNs. E.g.: ::
>>> myarr = np.array([1., 0., np.nan, 3.])
>>> np.where(myarr == np.nan)
>>> np.nan == np.nan # is always False! Use special numpy functions instead.
False
>>> myarr[myarr == np.nan] = 0. # doesn't work
>>> myarr
array([ 1., 0., NaN, 3.])
>>> myarr[np.isnan(myarr)] = 0. # use this instead find
>>> myarr
array([ 1., 0., 0., 3.])
Other related special value functions: ::
isinf(): True if value is inf
isfinite(): True if not nan or inf
nan_to_num(): Map nan to 0, inf to max float, -inf to min float
The following corresponds to the usual functions except that nans are excluded
from the results: ::
nansum()
nanmax()
nanmin()
nanargmax()
nanargmin()
>>> x = np.arange(10.)
>>> x[3] = np.nan
>>> x.sum()
nan
>>> np.nansum(x)
42.0
How numpy handles numerical exceptions
Default is to "warn"
But this can be changed, and it can be set individually for different kinds
of exceptions. The different behaviors are: ::
'ignore' : ignore completely
'warn' : print a warning (once only)
'raise' : raise an exception
'call' : call a user-supplied function (set using seterrcall())
These behaviors can be set for all kinds of errors or specific ones: ::
all: apply to all numeric exceptions
invalid: when NaNs are generated
divide: divide by zero (for integers as well!)
overflow: floating point overflows
underflow: floating point underflows
Note that integer divide-by-zero is handled by the same machinery.
These behaviors are set on a per-thread basis.
Examples:
------------
::
>>> oldsettings = np.seterr(all='warn')
>>> np.zeros(5,dtype=np.float32)/0.
invalid value encountered in divide
>>> j = np.seterr(under='ignore')
>>> np.array([1.e-100])**10
>>> j = np.seterr(invalid='raise')
>>> np.sqrt(np.array([-1.]))
FloatingPointError: invalid value encountered in sqrt
>>> def errorhandler(errstr, errflag):
... print "saw stupid error!"
>>> np.seterrcall(errorhandler)
<function err_handler at 0x...>
>>> j = np.seterr(all='call')
>>> np.zeros(5, dtype=np.int32)/0
FloatingPointError: invalid value encountered in divide
saw stupid error!
>>> j = np.seterr(**oldsettings) # restore previous
... # error-handling settings
Interfacing to C:
-----------------
Only a survey of the choices. Little detail on how each works.
1) Bare metal, wrap your own C-code manually.
- Plusses:
- Efficient
- No dependencies on other tools
- Minuses:
- Lots of learning overhead:
- need to learn basics of Python C API
- need to learn basics of numpy C API
- need to learn how to handle reference counting and love it.
- Reference counting often difficult to get right.
- getting it wrong leads to memory leaks, and worse, segfaults
- API will change for Python 3.0!
2) pyrex
- Plusses:
- avoid learning C API's
- no dealing with reference counting
- can code in psuedo python and generate C code
- can also interface to existing C code
- should shield you from changes to Python C api
- become pretty popular within Python community
- Minuses:
- Can write code in non-standard form which may become obsolete
- Not as flexible as manual wrapping
- Maintainers not easily adaptable to new features
Thus:
3) cython - fork of pyrex to allow needed features for SAGE
- being considered as the standard scipy/numpy wrapping tool
- fast indexing support for arrays
4) ctypes
- Plusses:
- part of Python standard library
- good for interfacing to existing sharable libraries, particularly
Windows DLLs
- avoids API/reference counting issues
- good numpy support: arrays have all these in their ctypes
attribute: ::
a.ctypes.data a.ctypes.get_strides
a.ctypes.data_as a.ctypes.shape
a.ctypes.get_as_parameter a.ctypes.shape_as
a.ctypes.get_data a.ctypes.strides
a.ctypes.get_shape a.ctypes.strides_as
- Minuses:
- can't use for writing code to be turned into C extensions, only a wrapper
tool.
5) SWIG (automatic wrapper generator)
- Plusses:
- around a long time
- multiple scripting language support
- C++ support
- Good for wrapping large (many functions) existing C libraries
- Minuses:
- generates lots of code between Python and the C code
- can cause performance problems that are nearly impossible to optimize
out
- interface files can be hard to write
- doesn't necessarily avoid reference counting issues or needing to know
API's
7) Weave
- Plusses:
- Phenomenal tool
- can turn many numpy expressions into C code
- dynamic compiling and loading of generated C code
- can embed pure C code in Python module and have weave extract, generate
interfaces and compile, etc.
- Minuses:
- Future uncertain--lacks a champion
8) Psyco
- Plusses:
- Turns pure python into efficient machine code through jit-like
optimizations
- very fast when it optimizes well
- Minuses:
- Only on intel (windows?)
- Doesn't do much for numpy?
Interfacing to Fortran:
-----------------------
Fortran: Clear choice is f2py. (Pyfort is an older alternative, but not
supported any longer)
Interfacing to C++:
-------------------
1) CXX
2) Boost.python
3) SWIG
4) Sage has used cython to wrap C++ (not pretty, but it can be done)
5) SIP (used mainly in PyQT)
"""
| gpl-3.0 |
BeegorMif/HTPC-Manager | lib/concurrent/futures/_compat.py | 179 | 4645 | from keyword import iskeyword as _iskeyword
from operator import itemgetter as _itemgetter
import sys as _sys
def namedtuple(typename, field_names):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec(template, namespace)
except SyntaxError:
e = _sys.exc_info()[1]
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
return result
| gpl-3.0 |
SoreGums/bitcoinxt | qa/rpc-tests/getchaintips.py | 140 | 2130 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the getchaintips API. We introduce a network split, work
# on chains of different lengths, and join the network together again.
# This gives us two tips, verify that it works.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (BitcoinTestFramework):
def run_test (self):
BitcoinTestFramework.run_test (self)
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10);
self.nodes[2].generate(20);
self.sync_all ()
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| mit |
karllessard/tensorflow | tensorflow/python/framework/dtypes_test.py | 8 | 15771 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.dtypes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import types_pb2
from tensorflow.python import _dtypes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
def _is_numeric_dtype_enum(datatype_enum):
non_numeric_dtypes = [types_pb2.DT_VARIANT,
types_pb2.DT_VARIANT_REF,
types_pb2.DT_INVALID,
types_pb2.DT_RESOURCE,
types_pb2.DT_RESOURCE_REF]
return datatype_enum not in non_numeric_dtypes
class TypesTest(test_util.TensorFlowTestCase):
def testAllTypesConstructible(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
self.assertEqual(datatype_enum,
dtypes.DType(datatype_enum).as_datatype_enum)
def testAllTypesConvertibleToDType(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dt = dtypes.as_dtype(datatype_enum)
self.assertEqual(datatype_enum, dt.as_datatype_enum)
def testAllTypesConvertibleToNumpyDtype(self):
for datatype_enum in types_pb2.DataType.values():
if not _is_numeric_dtype_enum(datatype_enum):
continue
dtype = dtypes.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
_ = np.empty((1, 1, 1, 1), dtype=numpy_dtype)
if dtype.base_dtype != dtypes.bfloat16:
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
self.assertEqual(
dtypes.as_dtype(datatype_enum).base_dtype,
dtypes.as_dtype(numpy_dtype))
def testAllPybind11DTypeConvertibleToDType(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dtype = _dtypes.DType(datatype_enum)
self.assertEqual(dtypes.as_dtype(datatype_enum), dtype)
def testInvalid(self):
with self.assertRaises(TypeError):
dtypes.DType(types_pb2.DT_INVALID)
with self.assertRaises(TypeError):
dtypes.as_dtype(types_pb2.DT_INVALID)
def testNumpyConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype(np.float32))
self.assertIs(dtypes.float64, dtypes.as_dtype(np.float64))
self.assertIs(dtypes.int32, dtypes.as_dtype(np.int32))
self.assertIs(dtypes.int64, dtypes.as_dtype(np.int64))
self.assertIs(dtypes.uint8, dtypes.as_dtype(np.uint8))
self.assertIs(dtypes.uint16, dtypes.as_dtype(np.uint16))
self.assertIs(dtypes.int16, dtypes.as_dtype(np.int16))
self.assertIs(dtypes.int8, dtypes.as_dtype(np.int8))
self.assertIs(dtypes.complex64, dtypes.as_dtype(np.complex64))
self.assertIs(dtypes.complex128, dtypes.as_dtype(np.complex128))
self.assertIs(dtypes.string, dtypes.as_dtype(np.object_))
self.assertIs(dtypes.string,
dtypes.as_dtype(np.array(["foo", "bar"]).dtype))
self.assertIs(dtypes.bool, dtypes.as_dtype(np.bool_))
with self.assertRaises(TypeError):
dtypes.as_dtype(np.dtype([("f1", np.uint), ("f2", np.int32)]))
class AnObject(object):
dtype = "f4"
self.assertIs(dtypes.float32, dtypes.as_dtype(AnObject))
class AnotherObject(object):
dtype = np.dtype(np.complex64)
self.assertIs(dtypes.complex64, dtypes.as_dtype(AnotherObject))
def testRealDtype(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.bool, dtypes.uint8, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64
]:
self.assertIs(dtype.real_dtype, dtype)
self.assertIs(dtypes.complex64.real_dtype, dtypes.float32)
self.assertIs(dtypes.complex128.real_dtype, dtypes.float64)
def testStringConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype("float32"))
self.assertIs(dtypes.float64, dtypes.as_dtype("float64"))
self.assertIs(dtypes.int32, dtypes.as_dtype("int32"))
self.assertIs(dtypes.uint8, dtypes.as_dtype("uint8"))
self.assertIs(dtypes.uint16, dtypes.as_dtype("uint16"))
self.assertIs(dtypes.int16, dtypes.as_dtype("int16"))
self.assertIs(dtypes.int8, dtypes.as_dtype("int8"))
self.assertIs(dtypes.string, dtypes.as_dtype("string"))
self.assertIs(dtypes.complex64, dtypes.as_dtype("complex64"))
self.assertIs(dtypes.complex128, dtypes.as_dtype("complex128"))
self.assertIs(dtypes.int64, dtypes.as_dtype("int64"))
self.assertIs(dtypes.bool, dtypes.as_dtype("bool"))
self.assertIs(dtypes.qint8, dtypes.as_dtype("qint8"))
self.assertIs(dtypes.quint8, dtypes.as_dtype("quint8"))
self.assertIs(dtypes.qint32, dtypes.as_dtype("qint32"))
self.assertIs(dtypes.bfloat16, dtypes.as_dtype("bfloat16"))
self.assertIs(dtypes.float32_ref, dtypes.as_dtype("float32_ref"))
self.assertIs(dtypes.float64_ref, dtypes.as_dtype("float64_ref"))
self.assertIs(dtypes.int32_ref, dtypes.as_dtype("int32_ref"))
self.assertIs(dtypes.uint8_ref, dtypes.as_dtype("uint8_ref"))
self.assertIs(dtypes.int16_ref, dtypes.as_dtype("int16_ref"))
self.assertIs(dtypes.int8_ref, dtypes.as_dtype("int8_ref"))
self.assertIs(dtypes.string_ref, dtypes.as_dtype("string_ref"))
self.assertIs(dtypes.complex64_ref, dtypes.as_dtype("complex64_ref"))
self.assertIs(dtypes.complex128_ref, dtypes.as_dtype("complex128_ref"))
self.assertIs(dtypes.int64_ref, dtypes.as_dtype("int64_ref"))
self.assertIs(dtypes.bool_ref, dtypes.as_dtype("bool_ref"))
self.assertIs(dtypes.qint8_ref, dtypes.as_dtype("qint8_ref"))
self.assertIs(dtypes.quint8_ref, dtypes.as_dtype("quint8_ref"))
self.assertIs(dtypes.qint32_ref, dtypes.as_dtype("qint32_ref"))
self.assertIs(dtypes.bfloat16_ref, dtypes.as_dtype("bfloat16_ref"))
with self.assertRaises(TypeError):
dtypes.as_dtype("not_a_type")
def testDTypesHaveUniqueNames(self):
dtypez = []
names = set()
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dtype = dtypes.as_dtype(datatype_enum)
dtypez.append(dtype)
names.add(dtype.name)
self.assertEqual(len(dtypez), len(names))
def testIsInteger(self):
self.assertEqual(dtypes.as_dtype("int8").is_integer, True)
self.assertEqual(dtypes.as_dtype("int16").is_integer, True)
self.assertEqual(dtypes.as_dtype("int32").is_integer, True)
self.assertEqual(dtypes.as_dtype("int64").is_integer, True)
self.assertEqual(dtypes.as_dtype("uint8").is_integer, True)
self.assertEqual(dtypes.as_dtype("uint16").is_integer, True)
self.assertEqual(dtypes.as_dtype("complex64").is_integer, False)
self.assertEqual(dtypes.as_dtype("complex128").is_integer, False)
self.assertEqual(dtypes.as_dtype("float").is_integer, False)
self.assertEqual(dtypes.as_dtype("double").is_integer, False)
self.assertEqual(dtypes.as_dtype("string").is_integer, False)
self.assertEqual(dtypes.as_dtype("bool").is_integer, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint8").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint16").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint32").is_integer, False)
self.assertEqual(dtypes.as_dtype("quint8").is_integer, False)
self.assertEqual(dtypes.as_dtype("quint16").is_integer, False)
def testIsFloating(self):
self.assertEqual(dtypes.as_dtype("int8").is_floating, False)
self.assertEqual(dtypes.as_dtype("int16").is_floating, False)
self.assertEqual(dtypes.as_dtype("int32").is_floating, False)
self.assertEqual(dtypes.as_dtype("int64").is_floating, False)
self.assertEqual(dtypes.as_dtype("uint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("uint16").is_floating, False)
self.assertEqual(dtypes.as_dtype("complex64").is_floating, False)
self.assertEqual(dtypes.as_dtype("complex128").is_floating, False)
self.assertEqual(dtypes.as_dtype("float32").is_floating, True)
self.assertEqual(dtypes.as_dtype("float64").is_floating, True)
self.assertEqual(dtypes.as_dtype("string").is_floating, False)
self.assertEqual(dtypes.as_dtype("bool").is_floating, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_floating, True)
self.assertEqual(dtypes.as_dtype("qint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("qint16").is_floating, False)
self.assertEqual(dtypes.as_dtype("qint32").is_floating, False)
self.assertEqual(dtypes.as_dtype("quint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("quint16").is_floating, False)
def testIsComplex(self):
self.assertEqual(dtypes.as_dtype("int8").is_complex, False)
self.assertEqual(dtypes.as_dtype("int16").is_complex, False)
self.assertEqual(dtypes.as_dtype("int32").is_complex, False)
self.assertEqual(dtypes.as_dtype("int64").is_complex, False)
self.assertEqual(dtypes.as_dtype("uint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("uint16").is_complex, False)
self.assertEqual(dtypes.as_dtype("complex64").is_complex, True)
self.assertEqual(dtypes.as_dtype("complex128").is_complex, True)
self.assertEqual(dtypes.as_dtype("float32").is_complex, False)
self.assertEqual(dtypes.as_dtype("float64").is_complex, False)
self.assertEqual(dtypes.as_dtype("string").is_complex, False)
self.assertEqual(dtypes.as_dtype("bool").is_complex, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint16").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint32").is_complex, False)
self.assertEqual(dtypes.as_dtype("quint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("quint16").is_complex, False)
def testIsUnsigned(self):
self.assertEqual(dtypes.as_dtype("int8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("uint8").is_unsigned, True)
self.assertEqual(dtypes.as_dtype("uint16").is_unsigned, True)
self.assertEqual(dtypes.as_dtype("float32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("float64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("bool").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("string").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("complex64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("complex128").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("quint8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("quint16").is_unsigned, False)
def testMinMax(self):
# make sure min/max evaluates for all data types that have min/max
for datatype_enum in types_pb2.DataType.values():
if not _is_numeric_dtype_enum(datatype_enum):
continue
dtype = dtypes.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
# ignore types for which there are no minimum/maximum (or we cannot
# compute it, such as for the q* types)
if (dtype.is_quantized or dtype.base_dtype == dtypes.bool or
dtype.base_dtype == dtypes.string or
dtype.base_dtype == dtypes.complex64 or
dtype.base_dtype == dtypes.complex128):
continue
print("%s: %s - %s" % (dtype, dtype.min, dtype.max))
# check some values that are known
if numpy_dtype == np.bool_:
self.assertEqual(dtype.min, 0)
self.assertEqual(dtype.max, 1)
if numpy_dtype == np.int8:
self.assertEqual(dtype.min, -128)
self.assertEqual(dtype.max, 127)
if numpy_dtype == np.int16:
self.assertEqual(dtype.min, -32768)
self.assertEqual(dtype.max, 32767)
if numpy_dtype == np.int32:
self.assertEqual(dtype.min, -2147483648)
self.assertEqual(dtype.max, 2147483647)
if numpy_dtype == np.int64:
self.assertEqual(dtype.min, -9223372036854775808)
self.assertEqual(dtype.max, 9223372036854775807)
if numpy_dtype == np.uint8:
self.assertEqual(dtype.min, 0)
self.assertEqual(dtype.max, 255)
if numpy_dtype == np.uint16:
if dtype == dtypes.uint16:
self.assertEqual(dtype.min, 0)
self.assertEqual(dtype.max, 65535)
elif dtype == dtypes.bfloat16:
self.assertEqual(dtype.min, 0)
self.assertEqual(dtype.max, 4294967295)
if numpy_dtype == np.uint32:
self.assertEqual(dtype.min, 0)
self.assertEqual(dtype.max, 4294967295)
if numpy_dtype == np.uint64:
self.assertEqual(dtype.min, 0)
self.assertEqual(dtype.max, 18446744073709551615)
if numpy_dtype in (np.float16, np.float32, np.float64):
self.assertEqual(dtype.min, np.finfo(numpy_dtype).min)
self.assertEqual(dtype.max, np.finfo(numpy_dtype).max)
if numpy_dtype == dtypes.bfloat16.as_numpy_dtype:
self.assertEqual(dtype.min, float.fromhex("-0x1.FEp127"))
self.assertEqual(dtype.max, float.fromhex("0x1.FEp127"))
def testRepr(self):
self.skipTest("b/142725777")
for enum, name in dtypes._TYPE_TO_STRING.items():
if enum > 100:
continue
dtype = dtypes.DType(enum)
self.assertEqual(repr(dtype), "tf." + name)
import tensorflow as tf
dtype2 = eval(repr(dtype))
self.assertEqual(type(dtype2), dtypes.DType)
self.assertEqual(dtype, dtype2)
def testEqWithNonTFTypes(self):
self.assertNotEqual(dtypes.int32, int)
self.assertNotEqual(dtypes.float64, 2.1)
def testPythonLongConversion(self):
self.assertIs(dtypes.int64, dtypes.as_dtype(np.array(2**32).dtype))
def testPythonTypesConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype(float))
self.assertIs(dtypes.bool, dtypes.as_dtype(bool))
def testReduce(self):
for enum in dtypes._TYPE_TO_STRING:
dtype = dtypes.DType(enum)
ctor, args = dtype.__reduce__()
self.assertEqual(ctor, dtypes.as_dtype)
self.assertEqual(args, (dtype.name,))
reconstructed = ctor(*args)
self.assertEqual(reconstructed, dtype)
def testAsDtypeInvalidArgument(self):
with self.assertRaises(TypeError):
dtypes.as_dtype((dtypes.int32, dtypes.float32))
def testAsDtypeReturnsInternedVersion(self):
dt = dtypes.DType(types_pb2.DT_VARIANT)
self.assertIs(dtypes.as_dtype(dt), dtypes.variant)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
yongtang/tensorflow | tensorflow/lite/testing/op_tests/unfused_gru.py | 17 | 2410 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for unfused_gru."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_unfused_gru_tests(options):
"""Make a set of tests for unfused gru op."""
test_parameters = [{
"units": [2, 5],
"batch_size": [1, 2],
"time": [3],
}]
def build_graph(parameters):
"""Build the graph for unfused_gru."""
inputs = [
tf.compat.v1.placeholder(
tf.float32, [parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
cell_fw = tf.compat.v1.nn.rnn_cell.GRUCell(parameters["units"])
cell_bw = tf.compat.v1.nn.rnn_cell.GRUCell(parameters["units"])
outputs, _, _ = tf.compat.v1.nn.static_bidirectional_rnn(
cell_fw, cell_bw, inputs, dtype=tf.float32)
return inputs, outputs
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for unfused_gru."""
input_values = [
create_tensor_data(tf.float32,
[parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
| apache-2.0 |
xmwu/pynt | docs/conf.py | 1 | 8263 | # -*- coding: utf-8 -*-
#
# PyNT documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 20 00:37:55 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyNT'
copyright = u'2016, Sean Wu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyNTdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PyNT.tex', u'PyNT Documentation',
u'Sean Wu', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pynt', u'PyNT Documentation',
[u'Sean Wu'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyNT', u'PyNT Documentation',
u'Sean Wu', 'PyNT', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
evansd/django | tests/utils_tests/test_archive.py | 91 | 3007 | import os
import shutil
import stat
import sys
import tempfile
import unittest
from django.utils.archive import Archive, extract
TEST_DIR = os.path.join(os.path.dirname(__file__), 'archives')
class ArchiveTester:
archive = None
def setUp(self):
"""
Create temporary directory for testing extraction.
"""
self.old_cwd = os.getcwd()
self.tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmpdir)
self.archive_path = os.path.join(TEST_DIR, self.archive)
self.archive_lead_path = os.path.join(TEST_DIR, "leadpath_%s" % self.archive)
# Always start off in TEST_DIR.
os.chdir(TEST_DIR)
def tearDown(self):
os.chdir(self.old_cwd)
def test_extract_method(self):
with Archive(self.archive) as archive:
archive.extract(self.tmpdir)
self.check_files(self.tmpdir)
def test_extract_method_no_to_path(self):
os.chdir(self.tmpdir)
with Archive(self.archive_path) as archive:
archive.extract()
self.check_files(self.tmpdir)
def test_extract_function(self):
extract(self.archive_path, self.tmpdir)
self.check_files(self.tmpdir)
@unittest.skipIf(sys.platform == 'win32', 'Python on Windows has a limited os.chmod().')
def test_extract_file_permissions(self):
"""Archive.extract() preserves file permissions."""
extract(self.archive_path, self.tmpdir)
filepath = os.path.join(self.tmpdir, 'executable')
# The file has executable permission.
self.assertTrue(os.stat(filepath).st_mode & stat.S_IXOTH)
filepath = os.path.join(self.tmpdir, 'no_permissions')
# The file is readable even though it doesn't have permission data in
# the archive.
self.assertTrue(os.stat(filepath).st_mode & stat.S_IROTH)
def test_extract_function_with_leadpath(self):
extract(self.archive_lead_path, self.tmpdir)
self.check_files(self.tmpdir)
def test_extract_function_no_to_path(self):
os.chdir(self.tmpdir)
extract(self.archive_path)
self.check_files(self.tmpdir)
def check_files(self, tmpdir):
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, '1')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, '2')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', '1')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', '2')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', 'bar', '1')))
self.assertTrue(os.path.isfile(os.path.join(self.tmpdir, 'foo', 'bar', '2')))
class TestZip(ArchiveTester, unittest.TestCase):
archive = 'foobar.zip'
class TestTar(ArchiveTester, unittest.TestCase):
archive = 'foobar.tar'
class TestGzipTar(ArchiveTester, unittest.TestCase):
archive = 'foobar.tar.gz'
class TestBzip2Tar(ArchiveTester, unittest.TestCase):
archive = 'foobar.tar.bz2'
| bsd-3-clause |
robobrobro/ballin-octo-shame | lib/Python-3.4.3/Lib/test/test_statistics.py | 8 | 60449 | """Test suite for statistics module, including helper NumericTestCase and
approx_equal function.
"""
import collections
import decimal
import doctest
import math
import random
import sys
import types
import unittest
from decimal import Decimal
from fractions import Fraction
# Module to be tested.
import statistics
# === Helper functions and class ===
def _calc_errors(actual, expected):
"""Return the absolute and relative errors between two numbers.
>>> _calc_errors(100, 75)
(25, 0.25)
>>> _calc_errors(100, 100)
(0, 0.0)
Returns the (absolute error, relative error) between the two arguments.
"""
base = max(abs(actual), abs(expected))
abs_err = abs(actual - expected)
rel_err = abs_err/base if base else float('inf')
return (abs_err, rel_err)
def approx_equal(x, y, tol=1e-12, rel=1e-7):
"""approx_equal(x, y [, tol [, rel]]) => True|False
Return True if numbers x and y are approximately equal, to within some
margin of error, otherwise return False. Numbers which compare equal
will also compare approximately equal.
x is approximately equal to y if the difference between them is less than
an absolute error tol or a relative error rel, whichever is bigger.
If given, both tol and rel must be finite, non-negative numbers. If not
given, default values are tol=1e-12 and rel=1e-7.
>>> approx_equal(1.2589, 1.2587, tol=0.0003, rel=0)
True
>>> approx_equal(1.2589, 1.2587, tol=0.0001, rel=0)
False
Absolute error is defined as abs(x-y); if that is less than or equal to
tol, x and y are considered approximately equal.
Relative error is defined as abs((x-y)/x) or abs((x-y)/y), whichever is
smaller, provided x or y are not zero. If that figure is less than or
equal to rel, x and y are considered approximately equal.
Complex numbers are not directly supported. If you wish to compare to
complex numbers, extract their real and imaginary parts and compare them
individually.
NANs always compare unequal, even with themselves. Infinities compare
approximately equal if they have the same sign (both positive or both
negative). Infinities with different signs compare unequal; so do
comparisons of infinities with finite numbers.
"""
if tol < 0 or rel < 0:
raise ValueError('error tolerances must be non-negative')
# NANs are never equal to anything, approximately or otherwise.
if math.isnan(x) or math.isnan(y):
return False
# Numbers which compare equal also compare approximately equal.
if x == y:
# This includes the case of two infinities with the same sign.
return True
if math.isinf(x) or math.isinf(y):
# This includes the case of two infinities of opposite sign, or
# one infinity and one finite number.
return False
# Two finite numbers.
actual_error = abs(x - y)
allowed_error = max(tol, rel*max(abs(x), abs(y)))
return actual_error <= allowed_error
# This class exists only as somewhere to stick a docstring containing
# doctests. The following docstring and tests were originally in a separate
# module. Now that it has been merged in here, I need somewhere to hang the.
# docstring. Ultimately, this class will die, and the information below will
# either become redundant, or be moved into more appropriate places.
class _DoNothing:
"""
When doing numeric work, especially with floats, exact equality is often
not what you want. Due to round-off error, it is often a bad idea to try
to compare floats with equality. Instead the usual procedure is to test
them with some (hopefully small!) allowance for error.
The ``approx_equal`` function allows you to specify either an absolute
error tolerance, or a relative error, or both.
Absolute error tolerances are simple, but you need to know the magnitude
of the quantities being compared:
>>> approx_equal(12.345, 12.346, tol=1e-3)
True
>>> approx_equal(12.345e6, 12.346e6, tol=1e-3) # tol is too small.
False
Relative errors are more suitable when the values you are comparing can
vary in magnitude:
>>> approx_equal(12.345, 12.346, rel=1e-4)
True
>>> approx_equal(12.345e6, 12.346e6, rel=1e-4)
True
but a naive implementation of relative error testing can run into trouble
around zero.
If you supply both an absolute tolerance and a relative error, the
comparison succeeds if either individual test succeeds:
>>> approx_equal(12.345e6, 12.346e6, tol=1e-3, rel=1e-4)
True
"""
pass
# We prefer this for testing numeric values that may not be exactly equal,
# and avoid using TestCase.assertAlmostEqual, because it sucks :-)
class NumericTestCase(unittest.TestCase):
"""Unit test class for numeric work.
This subclasses TestCase. In addition to the standard method
``TestCase.assertAlmostEqual``, ``assertApproxEqual`` is provided.
"""
# By default, we expect exact equality, unless overridden.
tol = rel = 0
def assertApproxEqual(
self, first, second, tol=None, rel=None, msg=None
):
"""Test passes if ``first`` and ``second`` are approximately equal.
This test passes if ``first`` and ``second`` are equal to
within ``tol``, an absolute error, or ``rel``, a relative error.
If either ``tol`` or ``rel`` are None or not given, they default to
test attributes of the same name (by default, 0).
The objects may be either numbers, or sequences of numbers. Sequences
are tested element-by-element.
>>> class MyTest(NumericTestCase):
... def test_number(self):
... x = 1.0/6
... y = sum([x]*6)
... self.assertApproxEqual(y, 1.0, tol=1e-15)
... def test_sequence(self):
... a = [1.001, 1.001e-10, 1.001e10]
... b = [1.0, 1e-10, 1e10]
... self.assertApproxEqual(a, b, rel=1e-3)
...
>>> import unittest
>>> from io import StringIO # Suppress test runner output.
>>> suite = unittest.TestLoader().loadTestsFromTestCase(MyTest)
>>> unittest.TextTestRunner(stream=StringIO()).run(suite)
<unittest.runner.TextTestResult run=2 errors=0 failures=0>
"""
if tol is None:
tol = self.tol
if rel is None:
rel = self.rel
if (
isinstance(first, collections.Sequence) and
isinstance(second, collections.Sequence)
):
check = self._check_approx_seq
else:
check = self._check_approx_num
check(first, second, tol, rel, msg)
def _check_approx_seq(self, first, second, tol, rel, msg):
if len(first) != len(second):
standardMsg = (
"sequences differ in length: %d items != %d items"
% (len(first), len(second))
)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
for i, (a,e) in enumerate(zip(first, second)):
self._check_approx_num(a, e, tol, rel, msg, i)
def _check_approx_num(self, first, second, tol, rel, msg, idx=None):
if approx_equal(first, second, tol, rel):
# Test passes. Return early, we are done.
return None
# Otherwise we failed.
standardMsg = self._make_std_err_msg(first, second, tol, rel, idx)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
@staticmethod
def _make_std_err_msg(first, second, tol, rel, idx):
# Create the standard error message for approx_equal failures.
assert first != second
template = (
' %r != %r\n'
' values differ by more than tol=%r and rel=%r\n'
' -> absolute error = %r\n'
' -> relative error = %r'
)
if idx is not None:
header = 'numeric sequences first differ at index %d.\n' % idx
template = header + template
# Calculate actual errors:
abs_err, rel_err = _calc_errors(first, second)
return template % (first, second, tol, rel, abs_err, rel_err)
# ========================
# === Test the helpers ===
# ========================
# --- Tests for approx_equal ---
class ApproxEqualSymmetryTest(unittest.TestCase):
# Test symmetry of approx_equal.
def test_relative_symmetry(self):
# Check that approx_equal treats relative error symmetrically.
# (a-b)/a is usually not equal to (a-b)/b. Ensure that this
# doesn't matter.
#
# Note: the reason for this test is that an early version
# of approx_equal was not symmetric. A relative error test
# would pass, or fail, depending on which value was passed
# as the first argument.
#
args1 = [2456, 37.8, -12.45, Decimal('2.54'), Fraction(17, 54)]
args2 = [2459, 37.2, -12.41, Decimal('2.59'), Fraction(15, 54)]
assert len(args1) == len(args2)
for a, b in zip(args1, args2):
self.do_relative_symmetry(a, b)
def do_relative_symmetry(self, a, b):
a, b = min(a, b), max(a, b)
assert a < b
delta = b - a # The absolute difference between the values.
rel_err1, rel_err2 = abs(delta/a), abs(delta/b)
# Choose an error margin halfway between the two.
rel = (rel_err1 + rel_err2)/2
# Now see that values a and b compare approx equal regardless of
# which is given first.
self.assertTrue(approx_equal(a, b, tol=0, rel=rel))
self.assertTrue(approx_equal(b, a, tol=0, rel=rel))
def test_symmetry(self):
# Test that approx_equal(a, b) == approx_equal(b, a)
args = [-23, -2, 5, 107, 93568]
delta = 2
for a in args:
for type_ in (int, float, Decimal, Fraction):
x = type_(a)*100
y = x + delta
r = abs(delta/max(x, y))
# There are five cases to check:
# 1) actual error <= tol, <= rel
self.do_symmetry_test(x, y, tol=delta, rel=r)
self.do_symmetry_test(x, y, tol=delta+1, rel=2*r)
# 2) actual error > tol, > rel
self.do_symmetry_test(x, y, tol=delta-1, rel=r/2)
# 3) actual error <= tol, > rel
self.do_symmetry_test(x, y, tol=delta, rel=r/2)
# 4) actual error > tol, <= rel
self.do_symmetry_test(x, y, tol=delta-1, rel=r)
self.do_symmetry_test(x, y, tol=delta-1, rel=2*r)
# 5) exact equality test
self.do_symmetry_test(x, x, tol=0, rel=0)
self.do_symmetry_test(x, y, tol=0, rel=0)
def do_symmetry_test(self, a, b, tol, rel):
template = "approx_equal comparisons don't match for %r"
flag1 = approx_equal(a, b, tol, rel)
flag2 = approx_equal(b, a, tol, rel)
self.assertEqual(flag1, flag2, template.format((a, b, tol, rel)))
class ApproxEqualExactTest(unittest.TestCase):
# Test the approx_equal function with exactly equal values.
# Equal values should compare as approximately equal.
# Test cases for exactly equal values, which should compare approx
# equal regardless of the error tolerances given.
def do_exactly_equal_test(self, x, tol, rel):
result = approx_equal(x, x, tol=tol, rel=rel)
self.assertTrue(result, 'equality failure for x=%r' % x)
result = approx_equal(-x, -x, tol=tol, rel=rel)
self.assertTrue(result, 'equality failure for x=%r' % -x)
def test_exactly_equal_ints(self):
# Test that equal int values are exactly equal.
for n in [42, 19740, 14974, 230, 1795, 700245, 36587]:
self.do_exactly_equal_test(n, 0, 0)
def test_exactly_equal_floats(self):
# Test that equal float values are exactly equal.
for x in [0.42, 1.9740, 1497.4, 23.0, 179.5, 70.0245, 36.587]:
self.do_exactly_equal_test(x, 0, 0)
def test_exactly_equal_fractions(self):
# Test that equal Fraction values are exactly equal.
F = Fraction
for f in [F(1, 2), F(0), F(5, 3), F(9, 7), F(35, 36), F(3, 7)]:
self.do_exactly_equal_test(f, 0, 0)
def test_exactly_equal_decimals(self):
# Test that equal Decimal values are exactly equal.
D = Decimal
for d in map(D, "8.2 31.274 912.04 16.745 1.2047".split()):
self.do_exactly_equal_test(d, 0, 0)
def test_exactly_equal_absolute(self):
# Test that equal values are exactly equal with an absolute error.
for n in [16, 1013, 1372, 1198, 971, 4]:
# Test as ints.
self.do_exactly_equal_test(n, 0.01, 0)
# Test as floats.
self.do_exactly_equal_test(n/10, 0.01, 0)
# Test as Fractions.
f = Fraction(n, 1234)
self.do_exactly_equal_test(f, 0.01, 0)
def test_exactly_equal_absolute_decimals(self):
# Test equal Decimal values are exactly equal with an absolute error.
self.do_exactly_equal_test(Decimal("3.571"), Decimal("0.01"), 0)
self.do_exactly_equal_test(-Decimal("81.3971"), Decimal("0.01"), 0)
def test_exactly_equal_relative(self):
# Test that equal values are exactly equal with a relative error.
for x in [8347, 101.3, -7910.28, Fraction(5, 21)]:
self.do_exactly_equal_test(x, 0, 0.01)
self.do_exactly_equal_test(Decimal("11.68"), 0, Decimal("0.01"))
def test_exactly_equal_both(self):
# Test that equal values are equal when both tol and rel are given.
for x in [41017, 16.742, -813.02, Fraction(3, 8)]:
self.do_exactly_equal_test(x, 0.1, 0.01)
D = Decimal
self.do_exactly_equal_test(D("7.2"), D("0.1"), D("0.01"))
class ApproxEqualUnequalTest(unittest.TestCase):
# Unequal values should compare unequal with zero error tolerances.
# Test cases for unequal values, with exact equality test.
def do_exactly_unequal_test(self, x):
for a in (x, -x):
result = approx_equal(a, a+1, tol=0, rel=0)
self.assertFalse(result, 'inequality failure for x=%r' % a)
def test_exactly_unequal_ints(self):
# Test unequal int values are unequal with zero error tolerance.
for n in [951, 572305, 478, 917, 17240]:
self.do_exactly_unequal_test(n)
def test_exactly_unequal_floats(self):
# Test unequal float values are unequal with zero error tolerance.
for x in [9.51, 5723.05, 47.8, 9.17, 17.24]:
self.do_exactly_unequal_test(x)
def test_exactly_unequal_fractions(self):
# Test that unequal Fractions are unequal with zero error tolerance.
F = Fraction
for f in [F(1, 5), F(7, 9), F(12, 11), F(101, 99023)]:
self.do_exactly_unequal_test(f)
def test_exactly_unequal_decimals(self):
# Test that unequal Decimals are unequal with zero error tolerance.
for d in map(Decimal, "3.1415 298.12 3.47 18.996 0.00245".split()):
self.do_exactly_unequal_test(d)
class ApproxEqualInexactTest(unittest.TestCase):
# Inexact test cases for approx_error.
# Test cases when comparing two values that are not exactly equal.
# === Absolute error tests ===
def do_approx_equal_abs_test(self, x, delta):
template = "Test failure for x={!r}, y={!r}"
for y in (x + delta, x - delta):
msg = template.format(x, y)
self.assertTrue(approx_equal(x, y, tol=2*delta, rel=0), msg)
self.assertFalse(approx_equal(x, y, tol=delta/2, rel=0), msg)
def test_approx_equal_absolute_ints(self):
# Test approximate equality of ints with an absolute error.
for n in [-10737, -1975, -7, -2, 0, 1, 9, 37, 423, 9874, 23789110]:
self.do_approx_equal_abs_test(n, 10)
self.do_approx_equal_abs_test(n, 2)
def test_approx_equal_absolute_floats(self):
# Test approximate equality of floats with an absolute error.
for x in [-284.126, -97.1, -3.4, -2.15, 0.5, 1.0, 7.8, 4.23, 3817.4]:
self.do_approx_equal_abs_test(x, 1.5)
self.do_approx_equal_abs_test(x, 0.01)
self.do_approx_equal_abs_test(x, 0.0001)
def test_approx_equal_absolute_fractions(self):
# Test approximate equality of Fractions with an absolute error.
delta = Fraction(1, 29)
numerators = [-84, -15, -2, -1, 0, 1, 5, 17, 23, 34, 71]
for f in (Fraction(n, 29) for n in numerators):
self.do_approx_equal_abs_test(f, delta)
self.do_approx_equal_abs_test(f, float(delta))
def test_approx_equal_absolute_decimals(self):
# Test approximate equality of Decimals with an absolute error.
delta = Decimal("0.01")
for d in map(Decimal, "1.0 3.5 36.08 61.79 7912.3648".split()):
self.do_approx_equal_abs_test(d, delta)
self.do_approx_equal_abs_test(-d, delta)
def test_cross_zero(self):
# Test for the case of the two values having opposite signs.
self.assertTrue(approx_equal(1e-5, -1e-5, tol=1e-4, rel=0))
# === Relative error tests ===
def do_approx_equal_rel_test(self, x, delta):
template = "Test failure for x={!r}, y={!r}"
for y in (x*(1+delta), x*(1-delta)):
msg = template.format(x, y)
self.assertTrue(approx_equal(x, y, tol=0, rel=2*delta), msg)
self.assertFalse(approx_equal(x, y, tol=0, rel=delta/2), msg)
def test_approx_equal_relative_ints(self):
# Test approximate equality of ints with a relative error.
self.assertTrue(approx_equal(64, 47, tol=0, rel=0.36))
self.assertTrue(approx_equal(64, 47, tol=0, rel=0.37))
# ---
self.assertTrue(approx_equal(449, 512, tol=0, rel=0.125))
self.assertTrue(approx_equal(448, 512, tol=0, rel=0.125))
self.assertFalse(approx_equal(447, 512, tol=0, rel=0.125))
def test_approx_equal_relative_floats(self):
# Test approximate equality of floats with a relative error.
for x in [-178.34, -0.1, 0.1, 1.0, 36.97, 2847.136, 9145.074]:
self.do_approx_equal_rel_test(x, 0.02)
self.do_approx_equal_rel_test(x, 0.0001)
def test_approx_equal_relative_fractions(self):
# Test approximate equality of Fractions with a relative error.
F = Fraction
delta = Fraction(3, 8)
for f in [F(3, 84), F(17, 30), F(49, 50), F(92, 85)]:
for d in (delta, float(delta)):
self.do_approx_equal_rel_test(f, d)
self.do_approx_equal_rel_test(-f, d)
def test_approx_equal_relative_decimals(self):
# Test approximate equality of Decimals with a relative error.
for d in map(Decimal, "0.02 1.0 5.7 13.67 94.138 91027.9321".split()):
self.do_approx_equal_rel_test(d, Decimal("0.001"))
self.do_approx_equal_rel_test(-d, Decimal("0.05"))
# === Both absolute and relative error tests ===
# There are four cases to consider:
# 1) actual error <= both absolute and relative error
# 2) actual error <= absolute error but > relative error
# 3) actual error <= relative error but > absolute error
# 4) actual error > both absolute and relative error
def do_check_both(self, a, b, tol, rel, tol_flag, rel_flag):
check = self.assertTrue if tol_flag else self.assertFalse
check(approx_equal(a, b, tol=tol, rel=0))
check = self.assertTrue if rel_flag else self.assertFalse
check(approx_equal(a, b, tol=0, rel=rel))
check = self.assertTrue if (tol_flag or rel_flag) else self.assertFalse
check(approx_equal(a, b, tol=tol, rel=rel))
def test_approx_equal_both1(self):
# Test actual error <= both absolute and relative error.
self.do_check_both(7.955, 7.952, 0.004, 3.8e-4, True, True)
self.do_check_both(-7.387, -7.386, 0.002, 0.0002, True, True)
def test_approx_equal_both2(self):
# Test actual error <= absolute error but > relative error.
self.do_check_both(7.955, 7.952, 0.004, 3.7e-4, True, False)
def test_approx_equal_both3(self):
# Test actual error <= relative error but > absolute error.
self.do_check_both(7.955, 7.952, 0.001, 3.8e-4, False, True)
def test_approx_equal_both4(self):
# Test actual error > both absolute and relative error.
self.do_check_both(2.78, 2.75, 0.01, 0.001, False, False)
self.do_check_both(971.44, 971.47, 0.02, 3e-5, False, False)
class ApproxEqualSpecialsTest(unittest.TestCase):
# Test approx_equal with NANs and INFs and zeroes.
def test_inf(self):
for type_ in (float, Decimal):
inf = type_('inf')
self.assertTrue(approx_equal(inf, inf))
self.assertTrue(approx_equal(inf, inf, 0, 0))
self.assertTrue(approx_equal(inf, inf, 1, 0.01))
self.assertTrue(approx_equal(-inf, -inf))
self.assertFalse(approx_equal(inf, -inf))
self.assertFalse(approx_equal(inf, 1000))
def test_nan(self):
for type_ in (float, Decimal):
nan = type_('nan')
for other in (nan, type_('inf'), 1000):
self.assertFalse(approx_equal(nan, other))
def test_float_zeroes(self):
nzero = math.copysign(0.0, -1)
self.assertTrue(approx_equal(nzero, 0.0, tol=0.1, rel=0.1))
def test_decimal_zeroes(self):
nzero = Decimal("-0.0")
self.assertTrue(approx_equal(nzero, Decimal(0), tol=0.1, rel=0.1))
class TestApproxEqualErrors(unittest.TestCase):
# Test error conditions of approx_equal.
def test_bad_tol(self):
# Test negative tol raises.
self.assertRaises(ValueError, approx_equal, 100, 100, -1, 0.1)
def test_bad_rel(self):
# Test negative rel raises.
self.assertRaises(ValueError, approx_equal, 100, 100, 1, -0.1)
# --- Tests for NumericTestCase ---
# The formatting routine that generates the error messages is complex enough
# that it too needs testing.
class TestNumericTestCase(unittest.TestCase):
# The exact wording of NumericTestCase error messages is *not* guaranteed,
# but we need to give them some sort of test to ensure that they are
# generated correctly. As a compromise, we look for specific substrings
# that are expected to be found even if the overall error message changes.
def do_test(self, args):
actual_msg = NumericTestCase._make_std_err_msg(*args)
expected = self.generate_substrings(*args)
for substring in expected:
self.assertIn(substring, actual_msg)
def test_numerictestcase_is_testcase(self):
# Ensure that NumericTestCase actually is a TestCase.
self.assertTrue(issubclass(NumericTestCase, unittest.TestCase))
def test_error_msg_numeric(self):
# Test the error message generated for numeric comparisons.
args = (2.5, 4.0, 0.5, 0.25, None)
self.do_test(args)
def test_error_msg_sequence(self):
# Test the error message generated for sequence comparisons.
args = (3.75, 8.25, 1.25, 0.5, 7)
self.do_test(args)
def generate_substrings(self, first, second, tol, rel, idx):
"""Return substrings we expect to see in error messages."""
abs_err, rel_err = _calc_errors(first, second)
substrings = [
'tol=%r' % tol,
'rel=%r' % rel,
'absolute error = %r' % abs_err,
'relative error = %r' % rel_err,
]
if idx is not None:
substrings.append('differ at index %d' % idx)
return substrings
# =======================================
# === Tests for the statistics module ===
# =======================================
class GlobalsTest(unittest.TestCase):
module = statistics
expected_metadata = ["__doc__", "__all__"]
def test_meta(self):
# Test for the existence of metadata.
for meta in self.expected_metadata:
self.assertTrue(hasattr(self.module, meta),
"%s not present" % meta)
def test_check_all(self):
# Check everything in __all__ exists and is public.
module = self.module
for name in module.__all__:
# No private names in __all__:
self.assertFalse(name.startswith("_"),
'private name "%s" in __all__' % name)
# And anything in __all__ must exist:
self.assertTrue(hasattr(module, name),
'missing name "%s" in __all__' % name)
class DocTests(unittest.TestCase):
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -OO and above")
def test_doc_tests(self):
failed, tried = doctest.testmod(statistics)
self.assertGreater(tried, 0)
self.assertEqual(failed, 0)
class StatisticsErrorTest(unittest.TestCase):
def test_has_exception(self):
errmsg = (
"Expected StatisticsError to be a ValueError, but got a"
" subclass of %r instead."
)
self.assertTrue(hasattr(statistics, 'StatisticsError'))
self.assertTrue(
issubclass(statistics.StatisticsError, ValueError),
errmsg % statistics.StatisticsError.__base__
)
# === Tests for private utility functions ===
class ExactRatioTest(unittest.TestCase):
# Test _exact_ratio utility.
def test_int(self):
for i in (-20, -3, 0, 5, 99, 10**20):
self.assertEqual(statistics._exact_ratio(i), (i, 1))
def test_fraction(self):
numerators = (-5, 1, 12, 38)
for n in numerators:
f = Fraction(n, 37)
self.assertEqual(statistics._exact_ratio(f), (n, 37))
def test_float(self):
self.assertEqual(statistics._exact_ratio(0.125), (1, 8))
self.assertEqual(statistics._exact_ratio(1.125), (9, 8))
data = [random.uniform(-100, 100) for _ in range(100)]
for x in data:
num, den = statistics._exact_ratio(x)
self.assertEqual(x, num/den)
def test_decimal(self):
D = Decimal
_exact_ratio = statistics._exact_ratio
self.assertEqual(_exact_ratio(D("0.125")), (125, 1000))
self.assertEqual(_exact_ratio(D("12.345")), (12345, 1000))
self.assertEqual(_exact_ratio(D("-1.98")), (-198, 100))
class DecimalToRatioTest(unittest.TestCase):
# Test _decimal_to_ratio private function.
def testSpecialsRaise(self):
# Test that NANs and INFs raise ValueError.
# Non-special values are covered by _exact_ratio above.
for d in (Decimal('NAN'), Decimal('sNAN'), Decimal('INF')):
self.assertRaises(ValueError, statistics._decimal_to_ratio, d)
def test_sign(self):
# Test sign is calculated correctly.
numbers = [Decimal("9.8765e12"), Decimal("9.8765e-12")]
for d in numbers:
# First test positive decimals.
assert d > 0
num, den = statistics._decimal_to_ratio(d)
self.assertGreaterEqual(num, 0)
self.assertGreater(den, 0)
# Then test negative decimals.
num, den = statistics._decimal_to_ratio(-d)
self.assertLessEqual(num, 0)
self.assertGreater(den, 0)
def test_negative_exponent(self):
# Test result when the exponent is negative.
t = statistics._decimal_to_ratio(Decimal("0.1234"))
self.assertEqual(t, (1234, 10000))
def test_positive_exponent(self):
# Test results when the exponent is positive.
t = statistics._decimal_to_ratio(Decimal("1.234e7"))
self.assertEqual(t, (12340000, 1))
def test_regression_20536(self):
# Regression test for issue 20536.
# See http://bugs.python.org/issue20536
t = statistics._decimal_to_ratio(Decimal("1e2"))
self.assertEqual(t, (100, 1))
t = statistics._decimal_to_ratio(Decimal("1.47e5"))
self.assertEqual(t, (147000, 1))
class CheckTypeTest(unittest.TestCase):
# Test _check_type private function.
def test_allowed(self):
# Test that a type which should be allowed is allowed.
allowed = set([int, float])
statistics._check_type(int, allowed)
statistics._check_type(float, allowed)
def test_not_allowed(self):
# Test that a type which should not be allowed raises.
allowed = set([int, float])
self.assertRaises(TypeError, statistics._check_type, Decimal, allowed)
def test_add_to_allowed(self):
# Test that a second type will be added to the allowed set.
allowed = set([int])
statistics._check_type(float, allowed)
self.assertEqual(allowed, set([int, float]))
# === Tests for public functions ===
class UnivariateCommonMixin:
# Common tests for most univariate functions that take a data argument.
def test_no_args(self):
# Fail if given no arguments.
self.assertRaises(TypeError, self.func)
def test_empty_data(self):
# Fail when the data argument (first argument) is empty.
for empty in ([], (), iter([])):
self.assertRaises(statistics.StatisticsError, self.func, empty)
def prepare_data(self):
"""Return int data for various tests."""
data = list(range(10))
while data == sorted(data):
random.shuffle(data)
return data
def test_no_inplace_modifications(self):
# Test that the function does not modify its input data.
data = self.prepare_data()
assert len(data) != 1 # Necessary to avoid infinite loop.
assert data != sorted(data)
saved = data[:]
assert data is not saved
_ = self.func(data)
self.assertListEqual(data, saved, "data has been modified")
def test_order_doesnt_matter(self):
# Test that the order of data points doesn't change the result.
# CAUTION: due to floating point rounding errors, the result actually
# may depend on the order. Consider this test representing an ideal.
# To avoid this test failing, only test with exact values such as ints
# or Fractions.
data = [1, 2, 3, 3, 3, 4, 5, 6]*100
expected = self.func(data)
random.shuffle(data)
actual = self.func(data)
self.assertEqual(expected, actual)
def test_type_of_data_collection(self):
# Test that the type of iterable data doesn't effect the result.
class MyList(list):
pass
class MyTuple(tuple):
pass
def generator(data):
return (obj for obj in data)
data = self.prepare_data()
expected = self.func(data)
for kind in (list, tuple, iter, MyList, MyTuple, generator):
result = self.func(kind(data))
self.assertEqual(result, expected)
def test_range_data(self):
# Test that functions work with range objects.
data = range(20, 50, 3)
expected = self.func(list(data))
self.assertEqual(self.func(data), expected)
def test_bad_arg_types(self):
# Test that function raises when given data of the wrong type.
# Don't roll the following into a loop like this:
# for bad in list_of_bad:
# self.check_for_type_error(bad)
#
# Since assertRaises doesn't show the arguments that caused the test
# failure, it is very difficult to debug these test failures when the
# following are in a loop.
self.check_for_type_error(None)
self.check_for_type_error(23)
self.check_for_type_error(42.0)
self.check_for_type_error(object())
def check_for_type_error(self, *args):
self.assertRaises(TypeError, self.func, *args)
def test_type_of_data_element(self):
# Check the type of data elements doesn't affect the numeric result.
# This is a weaker test than UnivariateTypeMixin.testTypesConserved,
# because it checks the numeric result by equality, but not by type.
class MyFloat(float):
def __truediv__(self, other):
return type(self)(super().__truediv__(other))
def __add__(self, other):
return type(self)(super().__add__(other))
__radd__ = __add__
raw = self.prepare_data()
expected = self.func(raw)
for kind in (float, MyFloat, Decimal, Fraction):
data = [kind(x) for x in raw]
result = type(expected)(self.func(data))
self.assertEqual(result, expected)
class UnivariateTypeMixin:
"""Mixin class for type-conserving functions.
This mixin class holds test(s) for functions which conserve the type of
individual data points. E.g. the mean of a list of Fractions should itself
be a Fraction.
Not all tests to do with types need go in this class. Only those that
rely on the function returning the same type as its input data.
"""
def test_types_conserved(self):
# Test that functions keeps the same type as their data points.
# (Excludes mixed data types.) This only tests the type of the return
# result, not the value.
class MyFloat(float):
def __truediv__(self, other):
return type(self)(super().__truediv__(other))
def __sub__(self, other):
return type(self)(super().__sub__(other))
def __rsub__(self, other):
return type(self)(super().__rsub__(other))
def __pow__(self, other):
return type(self)(super().__pow__(other))
def __add__(self, other):
return type(self)(super().__add__(other))
__radd__ = __add__
data = self.prepare_data()
for kind in (float, Decimal, Fraction, MyFloat):
d = [kind(x) for x in data]
result = self.func(d)
self.assertIs(type(result), kind)
class TestSum(NumericTestCase, UnivariateCommonMixin, UnivariateTypeMixin):
# Test cases for statistics._sum() function.
def setUp(self):
self.func = statistics._sum
def test_empty_data(self):
# Override test for empty data.
for data in ([], (), iter([])):
self.assertEqual(self.func(data), 0)
self.assertEqual(self.func(data, 23), 23)
self.assertEqual(self.func(data, 2.3), 2.3)
def test_ints(self):
self.assertEqual(self.func([1, 5, 3, -4, -8, 20, 42, 1]), 60)
self.assertEqual(self.func([4, 2, 3, -8, 7], 1000), 1008)
def test_floats(self):
self.assertEqual(self.func([0.25]*20), 5.0)
self.assertEqual(self.func([0.125, 0.25, 0.5, 0.75], 1.5), 3.125)
def test_fractions(self):
F = Fraction
self.assertEqual(self.func([Fraction(1, 1000)]*500), Fraction(1, 2))
def test_decimals(self):
D = Decimal
data = [D("0.001"), D("5.246"), D("1.702"), D("-0.025"),
D("3.974"), D("2.328"), D("4.617"), D("2.843"),
]
self.assertEqual(self.func(data), Decimal("20.686"))
def test_compare_with_math_fsum(self):
# Compare with the math.fsum function.
# Ideally we ought to get the exact same result, but sometimes
# we differ by a very slight amount :-(
data = [random.uniform(-100, 1000) for _ in range(1000)]
self.assertApproxEqual(self.func(data), math.fsum(data), rel=2e-16)
def test_start_argument(self):
# Test that the optional start argument works correctly.
data = [random.uniform(1, 1000) for _ in range(100)]
t = self.func(data)
self.assertEqual(t+42, self.func(data, 42))
self.assertEqual(t-23, self.func(data, -23))
self.assertEqual(t+1e20, self.func(data, 1e20))
def test_strings_fail(self):
# Sum of strings should fail.
self.assertRaises(TypeError, self.func, [1, 2, 3], '999')
self.assertRaises(TypeError, self.func, [1, 2, 3, '999'])
def test_bytes_fail(self):
# Sum of bytes should fail.
self.assertRaises(TypeError, self.func, [1, 2, 3], b'999')
self.assertRaises(TypeError, self.func, [1, 2, 3, b'999'])
def test_mixed_sum(self):
# Mixed input types are not (currently) allowed.
# Check that mixed data types fail.
self.assertRaises(TypeError, self.func, [1, 2.0, Fraction(1, 2)])
# And so does mixed start argument.
self.assertRaises(TypeError, self.func, [1, 2.0], Decimal(1))
class SumTortureTest(NumericTestCase):
def test_torture(self):
# Tim Peters' torture test for sum, and variants of same.
self.assertEqual(statistics._sum([1, 1e100, 1, -1e100]*10000), 20000.0)
self.assertEqual(statistics._sum([1e100, 1, 1, -1e100]*10000), 20000.0)
self.assertApproxEqual(
statistics._sum([1e-100, 1, 1e-100, -1]*10000), 2.0e-96, rel=5e-16
)
class SumSpecialValues(NumericTestCase):
# Test that sum works correctly with IEEE-754 special values.
def test_nan(self):
for type_ in (float, Decimal):
nan = type_('nan')
result = statistics._sum([1, nan, 2])
self.assertIs(type(result), type_)
self.assertTrue(math.isnan(result))
def check_infinity(self, x, inf):
"""Check x is an infinity of the same type and sign as inf."""
self.assertTrue(math.isinf(x))
self.assertIs(type(x), type(inf))
self.assertEqual(x > 0, inf > 0)
assert x == inf
def do_test_inf(self, inf):
# Adding a single infinity gives infinity.
result = statistics._sum([1, 2, inf, 3])
self.check_infinity(result, inf)
# Adding two infinities of the same sign also gives infinity.
result = statistics._sum([1, 2, inf, 3, inf, 4])
self.check_infinity(result, inf)
def test_float_inf(self):
inf = float('inf')
for sign in (+1, -1):
self.do_test_inf(sign*inf)
def test_decimal_inf(self):
inf = Decimal('inf')
for sign in (+1, -1):
self.do_test_inf(sign*inf)
def test_float_mismatched_infs(self):
# Test that adding two infinities of opposite sign gives a NAN.
inf = float('inf')
result = statistics._sum([1, 2, inf, 3, -inf, 4])
self.assertTrue(math.isnan(result))
def test_decimal_extendedcontext_mismatched_infs_to_nan(self):
# Test adding Decimal INFs with opposite sign returns NAN.
inf = Decimal('inf')
data = [1, 2, inf, 3, -inf, 4]
with decimal.localcontext(decimal.ExtendedContext):
self.assertTrue(math.isnan(statistics._sum(data)))
def test_decimal_basiccontext_mismatched_infs_to_nan(self):
# Test adding Decimal INFs with opposite sign raises InvalidOperation.
inf = Decimal('inf')
data = [1, 2, inf, 3, -inf, 4]
with decimal.localcontext(decimal.BasicContext):
self.assertRaises(decimal.InvalidOperation, statistics._sum, data)
def test_decimal_snan_raises(self):
# Adding sNAN should raise InvalidOperation.
sNAN = Decimal('sNAN')
data = [1, sNAN, 2]
self.assertRaises(decimal.InvalidOperation, statistics._sum, data)
# === Tests for averages ===
class AverageMixin(UnivariateCommonMixin):
# Mixin class holding common tests for averages.
def test_single_value(self):
# Average of a single value is the value itself.
for x in (23, 42.5, 1.3e15, Fraction(15, 19), Decimal('0.28')):
self.assertEqual(self.func([x]), x)
def test_repeated_single_value(self):
# The average of a single repeated value is the value itself.
for x in (3.5, 17, 2.5e15, Fraction(61, 67), Decimal('4.9712')):
for count in (2, 5, 10, 20):
data = [x]*count
self.assertEqual(self.func(data), x)
class TestMean(NumericTestCase, AverageMixin, UnivariateTypeMixin):
def setUp(self):
self.func = statistics.mean
def test_torture_pep(self):
# "Torture Test" from PEP-450.
self.assertEqual(self.func([1e100, 1, 3, -1e100]), 1)
def test_ints(self):
# Test mean with ints.
data = [0, 1, 2, 3, 3, 3, 4, 5, 5, 6, 7, 7, 7, 7, 8, 9]
random.shuffle(data)
self.assertEqual(self.func(data), 4.8125)
def test_floats(self):
# Test mean with floats.
data = [17.25, 19.75, 20.0, 21.5, 21.75, 23.25, 25.125, 27.5]
random.shuffle(data)
self.assertEqual(self.func(data), 22.015625)
def test_decimals(self):
# Test mean with ints.
D = Decimal
data = [D("1.634"), D("2.517"), D("3.912"), D("4.072"), D("5.813")]
random.shuffle(data)
self.assertEqual(self.func(data), D("3.5896"))
def test_fractions(self):
# Test mean with Fractions.
F = Fraction
data = [F(1, 2), F(2, 3), F(3, 4), F(4, 5), F(5, 6), F(6, 7), F(7, 8)]
random.shuffle(data)
self.assertEqual(self.func(data), F(1479, 1960))
def test_inf(self):
# Test mean with infinities.
raw = [1, 3, 5, 7, 9] # Use only ints, to avoid TypeError later.
for kind in (float, Decimal):
for sign in (1, -1):
inf = kind("inf")*sign
data = raw + [inf]
result = self.func(data)
self.assertTrue(math.isinf(result))
self.assertEqual(result, inf)
def test_mismatched_infs(self):
# Test mean with infinities of opposite sign.
data = [2, 4, 6, float('inf'), 1, 3, 5, float('-inf')]
result = self.func(data)
self.assertTrue(math.isnan(result))
def test_nan(self):
# Test mean with NANs.
raw = [1, 3, 5, 7, 9] # Use only ints, to avoid TypeError later.
for kind in (float, Decimal):
inf = kind("nan")
data = raw + [inf]
result = self.func(data)
self.assertTrue(math.isnan(result))
def test_big_data(self):
# Test adding a large constant to every data point.
c = 1e9
data = [3.4, 4.5, 4.9, 6.7, 6.8, 7.2, 8.0, 8.1, 9.4]
expected = self.func(data) + c
assert expected != c
result = self.func([x+c for x in data])
self.assertEqual(result, expected)
def test_doubled_data(self):
# Mean of [a,b,c...z] should be same as for [a,a,b,b,c,c...z,z].
data = [random.uniform(-3, 5) for _ in range(1000)]
expected = self.func(data)
actual = self.func(data*2)
self.assertApproxEqual(actual, expected)
def test_regression_20561(self):
# Regression test for issue 20561.
# See http://bugs.python.org/issue20561
d = Decimal('1e4')
self.assertEqual(statistics.mean([d]), d)
class TestMedian(NumericTestCase, AverageMixin):
# Common tests for median and all median.* functions.
def setUp(self):
self.func = statistics.median
def prepare_data(self):
"""Overload method from UnivariateCommonMixin."""
data = super().prepare_data()
if len(data)%2 != 1:
data.append(2)
return data
def test_even_ints(self):
# Test median with an even number of int data points.
data = [1, 2, 3, 4, 5, 6]
assert len(data)%2 == 0
self.assertEqual(self.func(data), 3.5)
def test_odd_ints(self):
# Test median with an odd number of int data points.
data = [1, 2, 3, 4, 5, 6, 9]
assert len(data)%2 == 1
self.assertEqual(self.func(data), 4)
def test_odd_fractions(self):
# Test median works with an odd number of Fractions.
F = Fraction
data = [F(1, 7), F(2, 7), F(3, 7), F(4, 7), F(5, 7)]
assert len(data)%2 == 1
random.shuffle(data)
self.assertEqual(self.func(data), F(3, 7))
def test_even_fractions(self):
# Test median works with an even number of Fractions.
F = Fraction
data = [F(1, 7), F(2, 7), F(3, 7), F(4, 7), F(5, 7), F(6, 7)]
assert len(data)%2 == 0
random.shuffle(data)
self.assertEqual(self.func(data), F(1, 2))
def test_odd_decimals(self):
# Test median works with an odd number of Decimals.
D = Decimal
data = [D('2.5'), D('3.1'), D('4.2'), D('5.7'), D('5.8')]
assert len(data)%2 == 1
random.shuffle(data)
self.assertEqual(self.func(data), D('4.2'))
def test_even_decimals(self):
# Test median works with an even number of Decimals.
D = Decimal
data = [D('1.2'), D('2.5'), D('3.1'), D('4.2'), D('5.7'), D('5.8')]
assert len(data)%2 == 0
random.shuffle(data)
self.assertEqual(self.func(data), D('3.65'))
class TestMedianDataType(NumericTestCase, UnivariateTypeMixin):
# Test conservation of data element type for median.
def setUp(self):
self.func = statistics.median
def prepare_data(self):
data = list(range(15))
assert len(data)%2 == 1
while data == sorted(data):
random.shuffle(data)
return data
class TestMedianLow(TestMedian, UnivariateTypeMixin):
def setUp(self):
self.func = statistics.median_low
def test_even_ints(self):
# Test median_low with an even number of ints.
data = [1, 2, 3, 4, 5, 6]
assert len(data)%2 == 0
self.assertEqual(self.func(data), 3)
def test_even_fractions(self):
# Test median_low works with an even number of Fractions.
F = Fraction
data = [F(1, 7), F(2, 7), F(3, 7), F(4, 7), F(5, 7), F(6, 7)]
assert len(data)%2 == 0
random.shuffle(data)
self.assertEqual(self.func(data), F(3, 7))
def test_even_decimals(self):
# Test median_low works with an even number of Decimals.
D = Decimal
data = [D('1.1'), D('2.2'), D('3.3'), D('4.4'), D('5.5'), D('6.6')]
assert len(data)%2 == 0
random.shuffle(data)
self.assertEqual(self.func(data), D('3.3'))
class TestMedianHigh(TestMedian, UnivariateTypeMixin):
def setUp(self):
self.func = statistics.median_high
def test_even_ints(self):
# Test median_high with an even number of ints.
data = [1, 2, 3, 4, 5, 6]
assert len(data)%2 == 0
self.assertEqual(self.func(data), 4)
def test_even_fractions(self):
# Test median_high works with an even number of Fractions.
F = Fraction
data = [F(1, 7), F(2, 7), F(3, 7), F(4, 7), F(5, 7), F(6, 7)]
assert len(data)%2 == 0
random.shuffle(data)
self.assertEqual(self.func(data), F(4, 7))
def test_even_decimals(self):
# Test median_high works with an even number of Decimals.
D = Decimal
data = [D('1.1'), D('2.2'), D('3.3'), D('4.4'), D('5.5'), D('6.6')]
assert len(data)%2 == 0
random.shuffle(data)
self.assertEqual(self.func(data), D('4.4'))
class TestMedianGrouped(TestMedian):
# Test median_grouped.
# Doesn't conserve data element types, so don't use TestMedianType.
def setUp(self):
self.func = statistics.median_grouped
def test_odd_number_repeated(self):
# Test median.grouped with repeated median values.
data = [12, 13, 14, 14, 14, 15, 15]
assert len(data)%2 == 1
self.assertEqual(self.func(data), 14)
#---
data = [12, 13, 14, 14, 14, 14, 15]
assert len(data)%2 == 1
self.assertEqual(self.func(data), 13.875)
#---
data = [5, 10, 10, 15, 20, 20, 20, 20, 25, 25, 30]
assert len(data)%2 == 1
self.assertEqual(self.func(data, 5), 19.375)
#---
data = [16, 18, 18, 18, 18, 20, 20, 20, 22, 22, 22, 24, 24, 26, 28]
assert len(data)%2 == 1
self.assertApproxEqual(self.func(data, 2), 20.66666667, tol=1e-8)
def test_even_number_repeated(self):
# Test median.grouped with repeated median values.
data = [5, 10, 10, 15, 20, 20, 20, 25, 25, 30]
assert len(data)%2 == 0
self.assertApproxEqual(self.func(data, 5), 19.16666667, tol=1e-8)
#---
data = [2, 3, 4, 4, 4, 5]
assert len(data)%2 == 0
self.assertApproxEqual(self.func(data), 3.83333333, tol=1e-8)
#---
data = [2, 3, 3, 4, 4, 4, 5, 5, 5, 5, 6, 6]
assert len(data)%2 == 0
self.assertEqual(self.func(data), 4.5)
#---
data = [3, 4, 4, 4, 5, 5, 5, 5, 6, 6]
assert len(data)%2 == 0
self.assertEqual(self.func(data), 4.75)
def test_repeated_single_value(self):
# Override method from AverageMixin.
# Yet again, failure of median_grouped to conserve the data type
# causes me headaches :-(
for x in (5.3, 68, 4.3e17, Fraction(29, 101), Decimal('32.9714')):
for count in (2, 5, 10, 20):
data = [x]*count
self.assertEqual(self.func(data), float(x))
def test_odd_fractions(self):
# Test median_grouped works with an odd number of Fractions.
F = Fraction
data = [F(5, 4), F(9, 4), F(13, 4), F(13, 4), F(17, 4)]
assert len(data)%2 == 1
random.shuffle(data)
self.assertEqual(self.func(data), 3.0)
def test_even_fractions(self):
# Test median_grouped works with an even number of Fractions.
F = Fraction
data = [F(5, 4), F(9, 4), F(13, 4), F(13, 4), F(17, 4), F(17, 4)]
assert len(data)%2 == 0
random.shuffle(data)
self.assertEqual(self.func(data), 3.25)
def test_odd_decimals(self):
# Test median_grouped works with an odd number of Decimals.
D = Decimal
data = [D('5.5'), D('6.5'), D('6.5'), D('7.5'), D('8.5')]
assert len(data)%2 == 1
random.shuffle(data)
self.assertEqual(self.func(data), 6.75)
def test_even_decimals(self):
# Test median_grouped works with an even number of Decimals.
D = Decimal
data = [D('5.5'), D('5.5'), D('6.5'), D('6.5'), D('7.5'), D('8.5')]
assert len(data)%2 == 0
random.shuffle(data)
self.assertEqual(self.func(data), 6.5)
#---
data = [D('5.5'), D('5.5'), D('6.5'), D('7.5'), D('7.5'), D('8.5')]
assert len(data)%2 == 0
random.shuffle(data)
self.assertEqual(self.func(data), 7.0)
def test_interval(self):
# Test median_grouped with interval argument.
data = [2.25, 2.5, 2.5, 2.75, 2.75, 3.0, 3.0, 3.25, 3.5, 3.75]
self.assertEqual(self.func(data, 0.25), 2.875)
data = [2.25, 2.5, 2.5, 2.75, 2.75, 2.75, 3.0, 3.0, 3.25, 3.5, 3.75]
self.assertApproxEqual(self.func(data, 0.25), 2.83333333, tol=1e-8)
data = [220, 220, 240, 260, 260, 260, 260, 280, 280, 300, 320, 340]
self.assertEqual(self.func(data, 20), 265.0)
class TestMode(NumericTestCase, AverageMixin, UnivariateTypeMixin):
# Test cases for the discrete version of mode.
def setUp(self):
self.func = statistics.mode
def prepare_data(self):
"""Overload method from UnivariateCommonMixin."""
# Make sure test data has exactly one mode.
return [1, 1, 1, 1, 3, 4, 7, 9, 0, 8, 2]
def test_range_data(self):
# Override test from UnivariateCommonMixin.
data = range(20, 50, 3)
self.assertRaises(statistics.StatisticsError, self.func, data)
def test_nominal_data(self):
# Test mode with nominal data.
data = 'abcbdb'
self.assertEqual(self.func(data), 'b')
data = 'fe fi fo fum fi fi'.split()
self.assertEqual(self.func(data), 'fi')
def test_discrete_data(self):
# Test mode with discrete numeric data.
data = list(range(10))
for i in range(10):
d = data + [i]
random.shuffle(d)
self.assertEqual(self.func(d), i)
def test_bimodal_data(self):
# Test mode with bimodal data.
data = [1, 1, 2, 2, 2, 2, 3, 4, 5, 6, 6, 6, 6, 7, 8, 9, 9]
assert data.count(2) == data.count(6) == 4
# Check for an exception.
self.assertRaises(statistics.StatisticsError, self.func, data)
def test_unique_data_failure(self):
# Test mode exception when data points are all unique.
data = list(range(10))
self.assertRaises(statistics.StatisticsError, self.func, data)
def test_none_data(self):
# Test that mode raises TypeError if given None as data.
# This test is necessary because the implementation of mode uses
# collections.Counter, which accepts None and returns an empty dict.
self.assertRaises(TypeError, self.func, None)
def test_counter_data(self):
# Test that a Counter is treated like any other iterable.
data = collections.Counter([1, 1, 1, 2])
# Since the keys of the counter are treated as data points, not the
# counts, this should raise.
self.assertRaises(statistics.StatisticsError, self.func, data)
# === Tests for variances and standard deviations ===
class VarianceStdevMixin(UnivariateCommonMixin):
# Mixin class holding common tests for variance and std dev.
# Subclasses should inherit from this before NumericTestClass, in order
# to see the rel attribute below. See testShiftData for an explanation.
rel = 1e-12
def test_single_value(self):
# Deviation of a single value is zero.
for x in (11, 19.8, 4.6e14, Fraction(21, 34), Decimal('8.392')):
self.assertEqual(self.func([x]), 0)
def test_repeated_single_value(self):
# The deviation of a single repeated value is zero.
for x in (7.2, 49, 8.1e15, Fraction(3, 7), Decimal('62.4802')):
for count in (2, 3, 5, 15):
data = [x]*count
self.assertEqual(self.func(data), 0)
def test_domain_error_regression(self):
# Regression test for a domain error exception.
# (Thanks to Geremy Condra.)
data = [0.123456789012345]*10000
# All the items are identical, so variance should be exactly zero.
# We allow some small round-off error, but not much.
result = self.func(data)
self.assertApproxEqual(result, 0.0, tol=5e-17)
self.assertGreaterEqual(result, 0) # A negative result must fail.
def test_shift_data(self):
# Test that shifting the data by a constant amount does not affect
# the variance or stdev. Or at least not much.
# Due to rounding, this test should be considered an ideal. We allow
# some tolerance away from "no change at all" by setting tol and/or rel
# attributes. Subclasses may set tighter or looser error tolerances.
raw = [1.03, 1.27, 1.94, 2.04, 2.58, 3.14, 4.75, 4.98, 5.42, 6.78]
expected = self.func(raw)
# Don't set shift too high, the bigger it is, the more rounding error.
shift = 1e5
data = [x + shift for x in raw]
self.assertApproxEqual(self.func(data), expected)
def test_shift_data_exact(self):
# Like test_shift_data, but result is always exact.
raw = [1, 3, 3, 4, 5, 7, 9, 10, 11, 16]
assert all(x==int(x) for x in raw)
expected = self.func(raw)
shift = 10**9
data = [x + shift for x in raw]
self.assertEqual(self.func(data), expected)
def test_iter_list_same(self):
# Test that iter data and list data give the same result.
# This is an explicit test that iterators and lists are treated the
# same; justification for this test over and above the similar test
# in UnivariateCommonMixin is that an earlier design had variance and
# friends swap between one- and two-pass algorithms, which would
# sometimes give different results.
data = [random.uniform(-3, 8) for _ in range(1000)]
expected = self.func(data)
self.assertEqual(self.func(iter(data)), expected)
class TestPVariance(VarianceStdevMixin, NumericTestCase, UnivariateTypeMixin):
# Tests for population variance.
def setUp(self):
self.func = statistics.pvariance
def test_exact_uniform(self):
# Test the variance against an exact result for uniform data.
data = list(range(10000))
random.shuffle(data)
expected = (10000**2 - 1)/12 # Exact value.
self.assertEqual(self.func(data), expected)
def test_ints(self):
# Test population variance with int data.
data = [4, 7, 13, 16]
exact = 22.5
self.assertEqual(self.func(data), exact)
def test_fractions(self):
# Test population variance with Fraction data.
F = Fraction
data = [F(1, 4), F(1, 4), F(3, 4), F(7, 4)]
exact = F(3, 8)
result = self.func(data)
self.assertEqual(result, exact)
self.assertIsInstance(result, Fraction)
def test_decimals(self):
# Test population variance with Decimal data.
D = Decimal
data = [D("12.1"), D("12.2"), D("12.5"), D("12.9")]
exact = D('0.096875')
result = self.func(data)
self.assertEqual(result, exact)
self.assertIsInstance(result, Decimal)
class TestVariance(VarianceStdevMixin, NumericTestCase, UnivariateTypeMixin):
# Tests for sample variance.
def setUp(self):
self.func = statistics.variance
def test_single_value(self):
# Override method from VarianceStdevMixin.
for x in (35, 24.7, 8.2e15, Fraction(19, 30), Decimal('4.2084')):
self.assertRaises(statistics.StatisticsError, self.func, [x])
def test_ints(self):
# Test sample variance with int data.
data = [4, 7, 13, 16]
exact = 30
self.assertEqual(self.func(data), exact)
def test_fractions(self):
# Test sample variance with Fraction data.
F = Fraction
data = [F(1, 4), F(1, 4), F(3, 4), F(7, 4)]
exact = F(1, 2)
result = self.func(data)
self.assertEqual(result, exact)
self.assertIsInstance(result, Fraction)
def test_decimals(self):
# Test sample variance with Decimal data.
D = Decimal
data = [D(2), D(2), D(7), D(9)]
exact = 4*D('9.5')/D(3)
result = self.func(data)
self.assertEqual(result, exact)
self.assertIsInstance(result, Decimal)
class TestPStdev(VarianceStdevMixin, NumericTestCase):
# Tests for population standard deviation.
def setUp(self):
self.func = statistics.pstdev
def test_compare_to_variance(self):
# Test that stdev is, in fact, the square root of variance.
data = [random.uniform(-17, 24) for _ in range(1000)]
expected = math.sqrt(statistics.pvariance(data))
self.assertEqual(self.func(data), expected)
class TestStdev(VarianceStdevMixin, NumericTestCase):
# Tests for sample standard deviation.
def setUp(self):
self.func = statistics.stdev
def test_single_value(self):
# Override method from VarianceStdevMixin.
for x in (81, 203.74, 3.9e14, Fraction(5, 21), Decimal('35.719')):
self.assertRaises(statistics.StatisticsError, self.func, [x])
def test_compare_to_variance(self):
# Test that stdev is, in fact, the square root of variance.
data = [random.uniform(-2, 9) for _ in range(1000)]
expected = math.sqrt(statistics.variance(data))
self.assertEqual(self.func(data), expected)
# === Run tests ===
def load_tests(loader, tests, ignore):
"""Used for doctest/unittest integration."""
tests.addTests(doctest.DocTestSuite())
return tests
if __name__ == "__main__":
unittest.main()
| mit |
wreckJ/intellij-community | plugins/hg4idea/testData/bin/hgext/inotify/common.py | 94 | 1560 | # server.py - inotify common protocol code
#
# Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
# Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import cStringIO, socket, struct
"""
Protocol between inotify clients and server:
Client sending query:
1) send protocol version number
2) send query type (string, 4 letters long)
3) send query parameters:
- For STAT, N+1 \0-separated strings:
1) N different names that need checking
2) 1 string containing all the status types to match
- No parameter needed for DBUG
Server sending query answer:
1) send protocol version number
2) send query type
3) send struct.pack'ed headers describing the length of the content:
e.g. for STAT, receive 9 integers describing the length of the
9 \0-separated string lists to be read:
* one file list for each lmar!?ic status type
* one list containing the directories visited during lookup
"""
version = 3
resphdrfmts = {
'STAT': '>lllllllll', # status requests
'DBUG': '>l' # debugging queries
}
resphdrsizes = dict((k, struct.calcsize(v))
for k, v in resphdrfmts.iteritems())
def recvcs(sock):
cs = cStringIO.StringIO()
s = True
try:
while s:
s = sock.recv(65536)
cs.write(s)
finally:
sock.shutdown(socket.SHUT_RD)
cs.seek(0)
return cs
| apache-2.0 |
weisongchen/flaskapp | venv/lib/python2.7/site-packages/pip/exceptions.py | 344 | 8121 | """Exceptions used throughout package"""
from __future__ import absolute_import
from itertools import chain, groupby, repeat
from pip._vendor.six import iteritems
class PipError(Exception):
"""Base pip exception"""
class InstallationError(PipError):
"""General exception during installation"""
class UninstallationError(PipError):
"""General exception during uninstallation"""
class DistributionNotFound(InstallationError):
"""Raised when a distribution cannot be found to satisfy a requirement"""
class RequirementsFileParseError(InstallationError):
"""Raised when a general error occurs parsing a requirements file line."""
class BestVersionAlreadyInstalled(PipError):
"""Raised when the most up-to-date version of a package is already
installed."""
class BadCommand(PipError):
"""Raised when virtualenv or a command is not found"""
class CommandError(PipError):
"""Raised when there is an error in command-line arguments"""
class PreviousBuildDirError(PipError):
"""Raised when there's a previous conflicting build directory"""
class InvalidWheelFilename(InstallationError):
"""Invalid wheel filename."""
class UnsupportedWheel(InstallationError):
"""Unsupported wheel."""
class HashErrors(InstallationError):
"""Multiple HashError instances rolled into one for reporting"""
def __init__(self):
self.errors = []
def append(self, error):
self.errors.append(error)
def __str__(self):
lines = []
self.errors.sort(key=lambda e: e.order)
for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__):
lines.append(cls.head)
lines.extend(e.body() for e in errors_of_cls)
if lines:
return '\n'.join(lines)
def __nonzero__(self):
return bool(self.errors)
def __bool__(self):
return self.__nonzero__()
class HashError(InstallationError):
"""
A failure to verify a package against known-good hashes
:cvar order: An int sorting hash exception classes by difficulty of
recovery (lower being harder), so the user doesn't bother fretting
about unpinned packages when he has deeper issues, like VCS
dependencies, to deal with. Also keeps error reports in a
deterministic order.
:cvar head: A section heading for display above potentially many
exceptions of this kind
:ivar req: The InstallRequirement that triggered this error. This is
pasted on after the exception is instantiated, because it's not
typically available earlier.
"""
req = None
head = ''
def body(self):
"""Return a summary of me for display under the heading.
This default implementation simply prints a description of the
triggering requirement.
:param req: The InstallRequirement that provoked this error, with
populate_link() having already been called
"""
return ' %s' % self._requirement_name()
def __str__(self):
return '%s\n%s' % (self.head, self.body())
def _requirement_name(self):
"""Return a description of the requirement that triggered me.
This default implementation returns long description of the req, with
line numbers
"""
return str(self.req) if self.req else 'unknown package'
class VcsHashUnsupported(HashError):
"""A hash was provided for a version-control-system-based requirement, but
we don't have a method for hashing those."""
order = 0
head = ("Can't verify hashes for these requirements because we don't "
"have a way to hash version control repositories:")
class DirectoryUrlHashUnsupported(HashError):
"""A hash was provided for a version-control-system-based requirement, but
we don't have a method for hashing those."""
order = 1
head = ("Can't verify hashes for these file:// requirements because they "
"point to directories:")
class HashMissing(HashError):
"""A hash was needed for a requirement but is absent."""
order = 2
head = ('Hashes are required in --require-hashes mode, but they are '
'missing from some requirements. Here is a list of those '
'requirements along with the hashes their downloaded archives '
'actually had. Add lines like these to your requirements files to '
'prevent tampering. (If you did not enable --require-hashes '
'manually, note that it turns on automatically when any package '
'has a hash.)')
def __init__(self, gotten_hash):
"""
:param gotten_hash: The hash of the (possibly malicious) archive we
just downloaded
"""
self.gotten_hash = gotten_hash
def body(self):
from pip.utils.hashes import FAVORITE_HASH # Dodge circular import.
package = None
if self.req:
# In the case of URL-based requirements, display the original URL
# seen in the requirements file rather than the package name,
# so the output can be directly copied into the requirements file.
package = (self.req.original_link if self.req.original_link
# In case someone feeds something downright stupid
# to InstallRequirement's constructor.
else getattr(self.req, 'req', None))
return ' %s --hash=%s:%s' % (package or 'unknown package',
FAVORITE_HASH,
self.gotten_hash)
class HashUnpinned(HashError):
"""A requirement had a hash specified but was not pinned to a specific
version."""
order = 3
head = ('In --require-hashes mode, all requirements must have their '
'versions pinned with ==. These do not:')
class HashMismatch(HashError):
"""
Distribution file hash values don't match.
:ivar package_name: The name of the package that triggered the hash
mismatch. Feel free to write to this after the exception is raise to
improve its error message.
"""
order = 4
head = ('THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS '
'FILE. If you have updated the package versions, please update '
'the hashes. Otherwise, examine the package contents carefully; '
'someone may have tampered with them.')
def __init__(self, allowed, gots):
"""
:param allowed: A dict of algorithm names pointing to lists of allowed
hex digests
:param gots: A dict of algorithm names pointing to hashes we
actually got from the files under suspicion
"""
self.allowed = allowed
self.gots = gots
def body(self):
return ' %s:\n%s' % (self._requirement_name(),
self._hash_comparison())
def _hash_comparison(self):
"""
Return a comparison of actual and expected hash values.
Example::
Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde
or 123451234512345123451234512345123451234512345
Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef
"""
def hash_then_or(hash_name):
# For now, all the decent hashes have 6-char names, so we can get
# away with hard-coding space literals.
return chain([hash_name], repeat(' or'))
lines = []
for hash_name, expecteds in iteritems(self.allowed):
prefix = hash_then_or(hash_name)
lines.extend((' Expected %s %s' % (next(prefix), e))
for e in expecteds)
lines.append(' Got %s\n' %
self.gots[hash_name].hexdigest())
prefix = ' or'
return '\n'.join(lines)
class UnsupportedPythonVersion(InstallationError):
"""Unsupported python version according to Requires-Python package
metadata."""
| mit |
gusai-francelabs/datafari | debian7/elk/kibana/node/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings.py | 383 | 45223 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
r"""Code to validate and convert settings of the Microsoft build tools.
This file contains code to validate and convert settings of the Microsoft
build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
and ValidateMSBuildSettings() are the entry points.
This file was created by comparing the projects created by Visual Studio 2008
and Visual Studio 2010 for all available settings through the user interface.
The MSBuild schemas were also considered. They are typically found in the
MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
"""
import sys
import re
# Dictionaries of settings validators. The key is the tool name, the value is
# a dictionary mapping setting names to validation functions.
_msvs_validators = {}
_msbuild_validators = {}
# A dictionary of settings converters. The key is the tool name, the value is
# a dictionary mapping setting names to conversion functions.
_msvs_to_msbuild_converters = {}
# Tool name mapping from MSVS to MSBuild.
_msbuild_name_of_tool = {}
class _Tool(object):
"""Represents a tool used by MSVS or MSBuild.
Attributes:
msvs_name: The name of the tool in MSVS.
msbuild_name: The name of the tool in MSBuild.
"""
def __init__(self, msvs_name, msbuild_name):
self.msvs_name = msvs_name
self.msbuild_name = msbuild_name
def _AddTool(tool):
"""Adds a tool to the four dictionaries used to process settings.
This only defines the tool. Each setting also needs to be added.
Args:
tool: The _Tool object to be added.
"""
_msvs_validators[tool.msvs_name] = {}
_msbuild_validators[tool.msbuild_name] = {}
_msvs_to_msbuild_converters[tool.msvs_name] = {}
_msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {})
class _Type(object):
"""Type of settings (Base class)."""
def ValidateMSVS(self, value):
"""Verifies that the value is legal for MSVS.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSVS.
"""
def ValidateMSBuild(self, value):
"""Verifies that the value is legal for MSBuild.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSBuild.
"""
def ConvertToMSBuild(self, value):
"""Returns the MSBuild equivalent of the MSVS value given.
Args:
value: the MSVS value to convert.
Returns:
the MSBuild equivalent.
Raises:
ValueError if value is not valid.
"""
return value
class _String(_Type):
"""A setting that's just a string."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
return ConvertVCMacrosToMSBuild(value)
class _StringList(_Type):
"""A settings that's a list of strings."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
if isinstance(value, list):
return [ConvertVCMacrosToMSBuild(i) for i in value]
else:
return ConvertVCMacrosToMSBuild(value)
class _Boolean(_Type):
"""Boolean settings, can have the values 'false' or 'true'."""
def _Validate(self, value):
if value != 'true' and value != 'false':
raise ValueError('expected bool; got %r' % value)
def ValidateMSVS(self, value):
self._Validate(value)
def ValidateMSBuild(self, value):
self._Validate(value)
def ConvertToMSBuild(self, value):
self._Validate(value)
return value
class _Integer(_Type):
"""Integer settings."""
def __init__(self, msbuild_base=10):
_Type.__init__(self)
self._msbuild_base = msbuild_base
def ValidateMSVS(self, value):
# Try to convert, this will raise ValueError if invalid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
# Try to convert, this will raise ValueError if invalid.
int(value, self._msbuild_base)
def ConvertToMSBuild(self, value):
msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
return msbuild_format % int(value)
class _Enumeration(_Type):
"""Type of settings that is an enumeration.
In MSVS, the values are indexes like '0', '1', and '2'.
MSBuild uses text labels that are more representative, like 'Win32'.
Constructor args:
label_list: an array of MSBuild labels that correspond to the MSVS index.
In the rare cases where MSVS has skipped an index value, None is
used in the array to indicate the unused spot.
new: an array of labels that are new to MSBuild.
"""
def __init__(self, label_list, new=None):
_Type.__init__(self)
self._label_list = label_list
self._msbuild_values = set(value for value in label_list
if value is not None)
if new is not None:
self._msbuild_values.update(new)
def ValidateMSVS(self, value):
# Try to convert. It will raise an exception if not valid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
if value not in self._msbuild_values:
raise ValueError('unrecognized enumerated value %s' % value)
def ConvertToMSBuild(self, value):
index = int(value)
if index < 0 or index >= len(self._label_list):
raise ValueError('index value (%d) not in expected range [0, %d)' %
(index, len(self._label_list)))
label = self._label_list[index]
if label is None:
raise ValueError('converted value for %s not specified.' % value)
return label
# Instantiate the various generic types.
_boolean = _Boolean()
_integer = _Integer()
# For now, we don't do any special validation on these types:
_string = _String()
_file_name = _String()
_folder_name = _String()
_file_list = _StringList()
_folder_list = _StringList()
_string_list = _StringList()
# Some boolean settings went from numerical values to boolean. The
# mapping is 0: default, 1: false, 2: true.
_newly_boolean = _Enumeration(['', 'false', 'true'])
def _Same(tool, name, setting_type):
"""Defines a setting that has the same name in MSVS and MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_Renamed(tool, name, name, setting_type)
def _Renamed(tool, msvs_name, msbuild_name, setting_type):
"""Defines a setting for which the name has changed.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting.
msbuild_name: the name of the MSBuild setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
_msbuild_validators[tool.msbuild_name][msbuild_name] = (
setting_type.ValidateMSBuild)
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
_MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
setting_type)
def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
msbuild_settings_name, setting_type):
"""Defines a setting that may have moved to a new section.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_settings_name: the MSVS name of the setting.
msbuild_tool_name: the name of the MSBuild tool to place the setting under.
msbuild_settings_name: the MSBuild name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {})
tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_settings_name] = (
setting_type.ValidateMSVS)
validator = setting_type.ValidateMSBuild
_msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate
def _MSVSOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSVS.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(unused_value, unused_msbuild_settings):
# Since this is for MSVS only settings, no translation will happen.
pass
_msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _MSBuildOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
# Let msbuild-only properties get translated as-is from msvs_settings.
tool_settings = msbuild_settings.setdefault(tool.msbuild_name, {})
tool_settings[name] = value
_msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _ConvertedToAdditionalOption(tool, msvs_name, flag):
"""Defines a setting that's handled via a command line option in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting that if 'true' becomes a flag
flag: the flag to insert at the end of the AdditionalOptions
"""
def _Translate(value, msbuild_settings):
if value == 'true':
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if 'AdditionalOptions' in tool_settings:
new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag)
else:
new_flags = flag
tool_settings['AdditionalOptions'] = new_flags
_msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _CustomGeneratePreprocessedFile(tool, msvs_name):
def _Translate(value, msbuild_settings):
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if value == '0':
tool_settings['PreprocessToFile'] = 'false'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '1': # /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '2': # /EP /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'true'
else:
raise ValueError('value must be one of [0, 1, 2]; got %s' % value)
# Create a bogus validator that looks for '0', '1', or '2'
msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS
_msvs_validators[tool.msvs_name][msvs_name] = msvs_validator
msbuild_validator = _boolean.ValidateMSBuild
msbuild_tool_validators = _msbuild_validators[tool.msbuild_name]
msbuild_tool_validators['PreprocessToFile'] = msbuild_validator
msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir')
fix_vc_macro_slashes_regex = re.compile(
r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list)
)
# Regular expression to detect keys that were generated by exclusion lists
_EXCLUDED_SUFFIX_RE = re.compile('^(.*)_excluded$')
def _ValidateExclusionSetting(setting, settings, error_msg, stderr=sys.stderr):
"""Verify that 'setting' is valid if it is generated from an exclusion list.
If the setting appears to be generated from an exclusion list, the root name
is checked.
Args:
setting: A string that is the setting name to validate
settings: A dictionary where the keys are valid settings
error_msg: The message to emit in the event of error
stderr: The stream receiving the error messages.
"""
# This may be unrecognized because it's an exclusion list. If the
# setting name has the _excluded suffix, then check the root name.
unrecognized = True
m = re.match(_EXCLUDED_SUFFIX_RE, setting)
if m:
root_setting = m.group(1)
unrecognized = root_setting not in settings
if unrecognized:
# We don't know this setting. Give a warning.
print >> stderr, error_msg
def FixVCMacroSlashes(s):
"""Replace macros which have excessive following slashes.
These macros are known to have a built-in trailing slash. Furthermore, many
scripts hiccup on processing paths with extra slashes in the middle.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
s = fix_vc_macro_slashes_regex.sub(r'\1', s)
return s
def ConvertVCMacrosToMSBuild(s):
"""Convert the the MSVS macros found in the string to the MSBuild equivalent.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
replace_map = {
'$(ConfigurationName)': '$(Configuration)',
'$(InputDir)': '%(RelativeDir)',
'$(InputExt)': '%(Extension)',
'$(InputFileName)': '%(Filename)%(Extension)',
'$(InputName)': '%(Filename)',
'$(InputPath)': '%(Identity)',
'$(ParentName)': '$(ProjectFileName)',
'$(PlatformName)': '$(Platform)',
'$(SafeInputName)': '%(Filename)',
}
for old, new in replace_map.iteritems():
s = s.replace(old, new)
s = FixVCMacroSlashes(s)
return s
def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
"""Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
Args:
msvs_settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
Returns:
A dictionary of MSBuild settings. The key is either the MSBuild tool name
or the empty string (for the global settings). The values are themselves
dictionaries of settings and their values.
"""
msbuild_settings = {}
for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems():
if msvs_tool_name in _msvs_to_msbuild_converters:
msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
for msvs_setting, msvs_value in msvs_tool_settings.iteritems():
if msvs_setting in msvs_tool:
# Invoke the translation function.
try:
msvs_tool[msvs_setting](msvs_value, msbuild_settings)
except ValueError, e:
print >> stderr, ('Warning: while converting %s/%s to MSBuild, '
'%s' % (msvs_tool_name, msvs_setting, e))
else:
_ValidateExclusionSetting(msvs_setting,
msvs_tool,
('Warning: unrecognized setting %s/%s '
'while converting to MSBuild.' %
(msvs_tool_name, msvs_setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s while converting to '
'MSBuild.' % msvs_tool_name)
return msbuild_settings
def ValidateMSVSSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msvs_validators, settings, stderr)
def ValidateMSBuildSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSBuild.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msbuild_validators, settings, stderr)
def _ValidateSettings(validators, settings, stderr):
"""Validates that the settings are valid for MSBuild or MSVS.
We currently only validate the names of the settings, not their values.
Args:
validators: A dictionary of tools and their validators.
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
for tool_name in settings:
if tool_name in validators:
tool_validators = validators[tool_name]
for setting, value in settings[tool_name].iteritems():
if setting in tool_validators:
try:
tool_validators[setting](value)
except ValueError, e:
print >> stderr, ('Warning: for %s/%s, %s' %
(tool_name, setting, e))
else:
_ValidateExclusionSetting(setting,
tool_validators,
('Warning: unrecognized setting %s/%s' %
(tool_name, setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s' % tool_name)
# MSVS and MBuild names of the tools.
_compile = _Tool('VCCLCompilerTool', 'ClCompile')
_link = _Tool('VCLinkerTool', 'Link')
_midl = _Tool('VCMIDLTool', 'Midl')
_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile')
_lib = _Tool('VCLibrarianTool', 'Lib')
_manifest = _Tool('VCManifestTool', 'Manifest')
_masm = _Tool('MASM', 'MASM')
_AddTool(_compile)
_AddTool(_link)
_AddTool(_midl)
_AddTool(_rc)
_AddTool(_lib)
_AddTool(_manifest)
_AddTool(_masm)
# Add sections only found in the MSBuild settings.
_msbuild_validators[''] = {}
_msbuild_validators['ProjectReference'] = {}
_msbuild_validators['ManifestResourceCompile'] = {}
# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and
# ClCompile in MSBuild.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for
# the schema of the MSBuild ClCompile settings.
# Options that have the same name in MSVS and MSBuild
_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_compile, 'AdditionalOptions', _string_list)
_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI
_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa
_Same(_compile, 'BrowseInformationFile', _file_name)
_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS
_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za
_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd
_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT
_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false'
_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx
_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except
_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope
_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI
_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU
_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc
_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_compile, 'MinimalRebuild', _boolean) # /Gm
_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl
_Same(_compile, 'OmitFramePointers', _boolean) # /Oy
_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D
_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd
_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR
_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes
_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc
_Same(_compile, 'StringPooling', _boolean) # /GF
_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t
_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u
_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_compile, 'UseFullPaths', _boolean) # /FC
_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL
_Same(_compile, 'XMLDocumentationFileName', _file_name)
_Same(_compile, 'AssemblerOutput',
_Enumeration(['NoListing',
'AssemblyCode', # /FA
'All', # /FAcs
'AssemblyAndMachineCode', # /FAc
'AssemblyAndSourceCode'])) # /FAs
_Same(_compile, 'BasicRuntimeChecks',
_Enumeration(['Default',
'StackFrameRuntimeCheck', # /RTCs
'UninitializedLocalUsageCheck', # /RTCu
'EnableFastChecks'])) # /RTC1
_Same(_compile, 'BrowseInformation',
_Enumeration(['false',
'true', # /FR
'true'])) # /Fr
_Same(_compile, 'CallingConvention',
_Enumeration(['Cdecl', # /Gd
'FastCall', # /Gr
'StdCall', # /Gz
'VectorCall'])) # /Gv
_Same(_compile, 'CompileAs',
_Enumeration(['Default',
'CompileAsC', # /TC
'CompileAsCpp'])) # /TP
_Same(_compile, 'DebugInformationFormat',
_Enumeration(['', # Disabled
'OldStyle', # /Z7
None,
'ProgramDatabase', # /Zi
'EditAndContinue'])) # /ZI
_Same(_compile, 'EnableEnhancedInstructionSet',
_Enumeration(['NotSet',
'StreamingSIMDExtensions', # /arch:SSE
'StreamingSIMDExtensions2', # /arch:SSE2
'AdvancedVectorExtensions', # /arch:AVX (vs2012+)
'NoExtensions', # /arch:IA32 (vs2012+)
# This one only exists in the new msbuild format.
'AdvancedVectorExtensions2', # /arch:AVX2 (vs2013r2+)
]))
_Same(_compile, 'ErrorReporting',
_Enumeration(['None', # /errorReport:none
'Prompt', # /errorReport:prompt
'Queue'], # /errorReport:queue
new=['Send'])) # /errorReport:send"
_Same(_compile, 'ExceptionHandling',
_Enumeration(['false',
'Sync', # /EHsc
'Async'], # /EHa
new=['SyncCThrow'])) # /EHs
_Same(_compile, 'FavorSizeOrSpeed',
_Enumeration(['Neither',
'Speed', # /Ot
'Size'])) # /Os
_Same(_compile, 'FloatingPointModel',
_Enumeration(['Precise', # /fp:precise
'Strict', # /fp:strict
'Fast'])) # /fp:fast
_Same(_compile, 'InlineFunctionExpansion',
_Enumeration(['Default',
'OnlyExplicitInline', # /Ob1
'AnySuitable'], # /Ob2
new=['Disabled'])) # /Ob0
_Same(_compile, 'Optimization',
_Enumeration(['Disabled', # /Od
'MinSpace', # /O1
'MaxSpeed', # /O2
'Full'])) # /Ox
_Same(_compile, 'RuntimeLibrary',
_Enumeration(['MultiThreaded', # /MT
'MultiThreadedDebug', # /MTd
'MultiThreadedDLL', # /MD
'MultiThreadedDebugDLL'])) # /MDd
_Same(_compile, 'StructMemberAlignment',
_Enumeration(['Default',
'1Byte', # /Zp1
'2Bytes', # /Zp2
'4Bytes', # /Zp4
'8Bytes', # /Zp8
'16Bytes'])) # /Zp16
_Same(_compile, 'WarningLevel',
_Enumeration(['TurnOffAllWarnings', # /W0
'Level1', # /W1
'Level2', # /W2
'Level3', # /W3
'Level4'], # /W4
new=['EnableAllWarnings'])) # /Wall
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking',
_boolean) # /Gy
_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions',
_boolean) # /Oi
_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C
_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo
_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp
_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile',
_file_name) # Used with /Yc and /Yu
_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile',
_file_name) # /Fp
_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader',
_Enumeration(['NotUsing', # VS recognized '' for this value too.
'Create', # /Yc
'Use'])) # /Yu
_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX
_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J')
# MSVS options not found in MSBuild.
_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean)
_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_compile, 'BuildingInIDE', _boolean)
_MSBuildOnly(_compile, 'CompileAsManaged',
_Enumeration([], new=['false',
'true', # /clr
'Pure', # /clr:pure
'Safe', # /clr:safe
'OldSyntax'])) # /clr:oldSyntax
_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch
_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP
_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi
_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors
_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we
_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu
# Defines a setting that needs very customized processing
_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile')
# Directives for converting MSVS VCLinkerTool to MSBuild Link.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for
# the schema of the MSBuild Link settings.
# Options that have the same name in MSVS and MSBuild
_Same(_link, 'AdditionalDependencies', _file_list)
_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
# /MANIFESTDEPENDENCY:
_Same(_link, 'AdditionalManifestDependencies', _file_list)
_Same(_link, 'AdditionalOptions', _string_list)
_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE
_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION
_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE
_Same(_link, 'BaseAddress', _string) # /BASE
_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK
_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD
_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN
_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE
_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC
_Same(_link, 'EntryPointSymbol', _string) # /ENTRY
_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE
_Same(_link, 'FunctionOrder', _file_name) # /ORDER
_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG
_Same(_link, 'GenerateMapFile', _boolean) # /MAP
_Same(_link, 'HeapCommitSize', _string)
_Same(_link, 'HeapReserveSize', _string) # /HEAP
_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL
_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB
_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER
_Same(_link, 'KeyFile', _file_name) # /KEYFILE
_Same(_link, 'ManifestFile', _file_name) # /ManifestFile
_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS
_Same(_link, 'MapFileName', _file_name)
_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT
_Same(_link, 'MergeSections', _string) # /MERGE
_Same(_link, 'MidlCommandFile', _file_name) # /MIDL
_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_link, 'OutputFile', _file_name) # /OUT
_Same(_link, 'PerUserRedirection', _boolean)
_Same(_link, 'Profile', _boolean) # /PROFILE
_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD
_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB
_Same(_link, 'RegisterOutput', _boolean)
_Same(_link, 'SetChecksum', _boolean) # /RELEASE
_Same(_link, 'StackCommitSize', _string)
_Same(_link, 'StackReserveSize', _string) # /STACK
_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED
_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD
_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD
_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY
_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true'
_Same(_link, 'Version', _string) # /VERSION
_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF
_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED
_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE
_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF
_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE
_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE
_subsystem_enumeration = _Enumeration(
['NotSet',
'Console', # /SUBSYSTEM:CONSOLE
'Windows', # /SUBSYSTEM:WINDOWS
'Native', # /SUBSYSTEM:NATIVE
'EFI Application', # /SUBSYSTEM:EFI_APPLICATION
'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER
'EFI ROM', # /SUBSYSTEM:EFI_ROM
'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER
'WindowsCE'], # /SUBSYSTEM:WINDOWSCE
new=['POSIX']) # /SUBSYSTEM:POSIX
_target_machine_enumeration = _Enumeration(
['NotSet',
'MachineX86', # /MACHINE:X86
None,
'MachineARM', # /MACHINE:ARM
'MachineEBC', # /MACHINE:EBC
'MachineIA64', # /MACHINE:IA64
None,
'MachineMIPS', # /MACHINE:MIPS
'MachineMIPS16', # /MACHINE:MIPS16
'MachineMIPSFPU', # /MACHINE:MIPSFPU
'MachineMIPSFPU16', # /MACHINE:MIPSFPU16
None,
None,
None,
'MachineSH4', # /MACHINE:SH4
None,
'MachineTHUMB', # /MACHINE:THUMB
'MachineX64']) # /MACHINE:X64
_Same(_link, 'AssemblyDebug',
_Enumeration(['',
'true', # /ASSEMBLYDEBUG
'false'])) # /ASSEMBLYDEBUG:DISABLE
_Same(_link, 'CLRImageType',
_Enumeration(['Default',
'ForceIJWImage', # /CLRIMAGETYPE:IJW
'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
_Same(_link, 'CLRThreadAttribute',
_Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
_Same(_link, 'DataExecutionPrevention',
_Enumeration(['',
'false', # /NXCOMPAT:NO
'true'])) # /NXCOMPAT
_Same(_link, 'Driver',
_Enumeration(['NotSet',
'Driver', # /Driver
'UpOnly', # /DRIVER:UPONLY
'WDM'])) # /DRIVER:WDM
_Same(_link, 'LinkTimeCodeGeneration',
_Enumeration(['Default',
'UseLinkTimeCodeGeneration', # /LTCG
'PGInstrument', # /LTCG:PGInstrument
'PGOptimization', # /LTCG:PGOptimize
'PGUpdate'])) # /LTCG:PGUpdate
_Same(_link, 'ShowProgress',
_Enumeration(['NotSet',
'LinkVerbose', # /VERBOSE
'LinkVerboseLib'], # /VERBOSE:Lib
new=['LinkVerboseICF', # /VERBOSE:ICF
'LinkVerboseREF', # /VERBOSE:REF
'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
'LinkVerboseCLR'])) # /VERBOSE:CLR
_Same(_link, 'SubSystem', _subsystem_enumeration)
_Same(_link, 'TargetMachine', _target_machine_enumeration)
_Same(_link, 'UACExecutionLevel',
_Enumeration(['AsInvoker', # /level='asInvoker'
'HighestAvailable', # /level='highestAvailable'
'RequireAdministrator'])) # /level='requireAdministrator'
_Same(_link, 'MinimumRequiredVersion', _string)
_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting',
_Enumeration(['NoErrorReport', # /ERRORREPORT:NONE
'PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin'], # /ERRORREPORT:QUEUE
new=['SendErrorReport'])) # /ERRORREPORT:SEND
_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
_file_list) # /NODEFAULTLIB
_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
_Moved(_link, 'GenerateManifest', '', _boolean)
_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
_Moved(_link, 'LinkIncremental', '', _newly_boolean)
_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
_MSBuildOnly(_link, 'ForceFileOutput',
_Enumeration([], new=['Enabled', # /FORCE
# /FORCE:MULTIPLE
'MultiplyDefinedSymbolOnly',
'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
_MSBuildOnly(_link, 'CreateHotPatchableImage',
_Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
'X86Image', # /FUNCTIONPADMIN:5
'X64Image', # /FUNCTIONPADMIN:6
'ItaniumImage'])) # /FUNCTIONPADMIN:16
_MSBuildOnly(_link, 'CLRSupportLastError',
_Enumeration([], new=['Enabled', # /CLRSupportLastError
'Disabled', # /CLRSupportLastError:NO
# /CLRSupportLastError:SYSTEMDLL
'SystemDlls']))
# Directives for converting VCResourceCompilerTool to ResourceCompile.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for
# the schema of the MSBuild ResourceCompile settings.
_Same(_rc, 'AdditionalOptions', _string_list)
_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_rc, 'Culture', _Integer(msbuild_base=16))
_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D
_Same(_rc, 'ResourceOutputFileName', _string) # /fo
_Same(_rc, 'ShowProgress', _boolean) # /v
# There is no UI in VisualStudio 2008 to set the following properties.
# However they are found in CL and other tools. Include them here for
# completeness, as they are very likely to have the same usage pattern.
_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u
# MSBuild options not found in MSVS.
_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n
_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name)
# Directives for converting VCMIDLTool to Midl.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for
# the schema of the MSBuild Midl settings.
_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_midl, 'AdditionalOptions', _string_list)
_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt
_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation
_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check
_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum
_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref
_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data
_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf
_Same(_midl, 'GenerateTypeLibrary', _boolean)
_Same(_midl, 'HeaderFileName', _file_name) # /h
_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir
_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid
_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203
_Same(_midl, 'OutputDirectory', _string) # /out
_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D
_Same(_midl, 'ProxyFileName', _file_name) # /proxy
_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o
_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_midl, 'TypeLibraryName', _file_name) # /tlb
_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_midl, 'WarnAsError', _boolean) # /WX
_Same(_midl, 'DefaultCharType',
_Enumeration(['Unsigned', # /char unsigned
'Signed', # /char signed
'Ascii'])) # /char ascii7
_Same(_midl, 'TargetEnvironment',
_Enumeration(['NotSet',
'Win32', # /env win32
'Itanium', # /env ia64
'X64'])) # /env x64
_Same(_midl, 'EnableErrorChecks',
_Enumeration(['EnableCustom',
'None', # /error none
'All'])) # /error all
_Same(_midl, 'StructMemberAlignment',
_Enumeration(['NotSet',
'1', # Zp1
'2', # Zp2
'4', # Zp4
'8'])) # Zp8
_Same(_midl, 'WarningLevel',
_Enumeration(['0', # /W0
'1', # /W1
'2', # /W2
'3', # /W3
'4'])) # /W4
_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata
_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters',
_boolean) # /robust
# MSBuild options not found in MSVS.
_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config
_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub
_MSBuildOnly(_midl, 'GenerateClientFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'GenerateServerFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL
_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub
_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn
_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_midl, 'TypeLibFormat',
_Enumeration([], new=['NewFormat', # /newtlb
'OldFormat'])) # /oldtlb
# Directives for converting VCLibrarianTool to Lib.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for
# the schema of the MSBuild Lib settings.
_Same(_lib, 'AdditionalDependencies', _file_list)
_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
_Same(_lib, 'AdditionalOptions', _string_list)
_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT
_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE
_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB
_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_lib, 'OutputFile', _file_name) # /OUT
_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_lib, 'UseUnicodeResponseFiles', _boolean)
_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG
_Same(_lib, 'TargetMachine', _target_machine_enumeration)
# TODO(jeanluc) _link defines the same value that gets moved to
# ProjectReference. We may want to validate that they are consistent.
_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false'
_MSBuildOnly(_lib, 'ErrorReporting',
_Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin', # /ERRORREPORT:QUEUE
'SendErrorReport', # /ERRORREPORT:SEND
'NoErrorReport'])) # /ERRORREPORT:NONE
_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string)
_MSBuildOnly(_lib, 'Name', _file_name) # /NAME
_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE
_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration)
_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX
_MSBuildOnly(_lib, 'Verbose', _boolean)
# Directives for converting VCManifestTool to Mt.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for
# the schema of the MSBuild Lib settings.
# Options that have the same name in MSVS and MSBuild
_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest
_Same(_manifest, 'AdditionalOptions', _string_list)
_Same(_manifest, 'AssemblyIdentity', _string) # /identity:
_Same(_manifest, 'ComponentFileName', _file_name) # /dll
_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs
_Same(_manifest, 'InputResourceManifests', _string) # /inputresource
_Same(_manifest, 'OutputManifestFile', _file_name) # /out
_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs
_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements
_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb:
_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate
_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name)
_Same(_manifest, 'VerboseOutput', _boolean) # /verbose
# Options that have moved location.
_MovedAndRenamed(_manifest, 'ManifestResourceFile',
'ManifestResourceCompile',
'ResourceOutputFileName',
_file_name)
_Moved(_manifest, 'EmbedManifest', '', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name)
_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean)
_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean)
_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category
_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly',
_file_name) # /managedassemblyname
_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource
_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency
_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name)
# Directives for MASM.
# See "$(VCTargetsPath)\BuildCustomizations\masm.xml" for the schema of the
# MSBuild MASM settings.
# Options that have the same name in MSVS and MSBuild.
_Same(_masm, 'UseSafeExceptionHandlers', _boolean) # /safeseh
| apache-2.0 |
bratsche/Neutron-Drive | google_appengine/lib/django_1_3/tests/regressiontests/custom_columns_regress/tests.py | 91 | 2919 | from django.test import TestCase
from django.core.exceptions import FieldError
from models import Author, Article
def pks(objects):
""" Return pks to be able to compare lists"""
return [o.pk for o in objects]
class CustomColumnRegression(TestCase):
def assertRaisesMessage(self, exc, msg, func, *args, **kwargs):
try:
func(*args, **kwargs)
except Exception, e:
self.assertEqual(msg, str(e))
self.assertTrue(isinstance(e, exc), "Expected %s, got %s" % (exc, type(e)))
def setUp(self):
self.a1 = Author.objects.create(first_name='John', last_name='Smith')
self.a2 = Author.objects.create(first_name='Peter', last_name='Jones')
self.authors = [self.a1, self.a2]
def test_basic_creation(self):
art = Article(headline='Django lets you build Web apps easily', primary_author=self.a1)
art.save()
art.authors = [self.a1, self.a2]
def test_author_querying(self):
self.assertQuerysetEqual(
Author.objects.all().order_by('last_name'),
['<Author: Peter Jones>', '<Author: John Smith>']
)
def test_author_filtering(self):
self.assertQuerysetEqual(
Author.objects.filter(first_name__exact='John'),
['<Author: John Smith>']
)
def test_author_get(self):
self.assertEqual(self.a1, Author.objects.get(first_name__exact='John'))
def test_filter_on_nonexistant_field(self):
self.assertRaisesMessage(
FieldError,
"Cannot resolve keyword 'firstname' into field. Choices are: Author_ID, article, first_name, last_name, primary_set",
Author.objects.filter,
firstname__exact='John'
)
def test_author_get_attributes(self):
a = Author.objects.get(last_name__exact='Smith')
self.assertEqual('John', a.first_name)
self.assertEqual('Smith', a.last_name)
self.assertRaisesMessage(
AttributeError,
"'Author' object has no attribute 'firstname'",
getattr,
a, 'firstname'
)
self.assertRaisesMessage(
AttributeError,
"'Author' object has no attribute 'last'",
getattr,
a, 'last'
)
def test_m2m_table(self):
art = Article.objects.create(headline='Django lets you build Web apps easily', primary_author=self.a1)
art.authors = self.authors
self.assertQuerysetEqual(
art.authors.all().order_by('last_name'),
['<Author: Peter Jones>', '<Author: John Smith>']
)
self.assertQuerysetEqual(
self.a1.article_set.all(),
['<Article: Django lets you build Web apps easily>']
)
self.assertQuerysetEqual(
art.authors.filter(last_name='Jones'),
['<Author: Peter Jones>']
)
| bsd-3-clause |
amitjamadagni/sympy | sympy/assumptions/ask.py | 1 | 11179 | """Module for querying SymPy objects about assumptions."""
from sympy.core import sympify
from sympy.logic.boolalg import to_cnf, And, Not, Or, Implies, Equivalent
from sympy.logic.inference import satisfiable
from sympy.assumptions.assume import (global_assumptions, Predicate,
AppliedPredicate)
class Q:
"""Supported ask keys."""
antihermitian = Predicate('antihermitian')
bounded = Predicate('bounded')
commutative = Predicate('commutative')
complex = Predicate('complex')
composite = Predicate('composite')
even = Predicate('even')
extended_real = Predicate('extended_real')
hermitian = Predicate('hermitian')
imaginary = Predicate('imaginary')
infinitesimal = Predicate('infinitesimal')
infinity = Predicate('infinity')
integer = Predicate('integer')
irrational = Predicate('irrational')
rational = Predicate('rational')
negative = Predicate('negative')
nonzero = Predicate('nonzero')
positive = Predicate('positive')
prime = Predicate('prime')
real = Predicate('real')
odd = Predicate('odd')
is_true = Predicate('is_true')
symmetric = Predicate('symmetric')
invertible = Predicate('invertible')
orthogonal = Predicate('orthogonal')
positive_definite = Predicate('positive_definite')
upper_triangular = Predicate('upper_triangular')
lower_triangular = Predicate('lower_triangular')
diagonal = Predicate('diagonal')
triangular = Predicate('triangular')
unit_triangular = Predicate('unit_triangular')
fullrank = Predicate('fullrank')
square = Predicate('square')
def _extract_facts(expr, symbol):
"""
Helper for ask().
Extracts the facts relevant to the symbol from an assumption.
Returns None if there is nothing to extract.
"""
if not expr.has(symbol):
return None
if isinstance(expr, AppliedPredicate):
if expr.arg == symbol:
return expr.func
else:
return
return expr.func(*filter(lambda x: x is not None,
[_extract_facts(arg, symbol) for arg in expr.args]))
def ask(proposition, assumptions=True, context=global_assumptions):
"""
Method for inferring properties about objects.
**Syntax**
* ask(proposition)
* ask(proposition, assumptions)
where ``proposition`` is any boolean expression
Examples
========
>>> from sympy import ask, Q, pi
>>> from sympy.abc import x, y
>>> ask(Q.rational(pi))
False
>>> ask(Q.even(x*y), Q.even(x) & Q.integer(y))
True
>>> ask(Q.prime(x*y), Q.integer(x) & Q.integer(y))
False
**Remarks**
Relations in assumptions are not implemented (yet), so the following
will not give a meaningful result.
>>> ask(Q.positive(x), Q.is_true(x > 0)) # doctest: +SKIP
It is however a work in progress.
"""
assumptions = And(assumptions, And(*context))
if isinstance(proposition, AppliedPredicate):
key, expr = proposition.func, sympify(proposition.arg)
else:
key, expr = Q.is_true, sympify(proposition)
# direct resolution method, no logic
res = key(expr)._eval_ask(assumptions)
if res is not None:
return res
if assumptions is True:
return
local_facts = _extract_facts(assumptions, expr)
if local_facts is None or local_facts is True:
return
# See if there's a straight-forward conclusion we can make for the inference
if local_facts.is_Atom:
if key in known_facts_dict[local_facts]:
return True
if Not(key) in known_facts_dict[local_facts]:
return False
elif local_facts.func is And and all(k in known_facts_dict for k in local_facts.args):
for assum in local_facts.args:
if assum.is_Atom:
if key in known_facts_dict[assum]:
return True
if Not(key) in known_facts_dict[assum]:
return False
elif assum.func is Not and assum.args[0].is_Atom:
if key in known_facts_dict[assum]:
return False
if Not(key) in known_facts_dict[assum]:
return True
elif (isinstance(key, Predicate) and
local_facts.func is Not and local_facts.args[0].is_Atom):
if local_facts.args[0] in known_facts_dict[key]:
return False
# Failing all else, we do a full logical inference
return ask_full_inference(key, local_facts, known_facts_cnf)
def ask_full_inference(proposition, assumptions, known_facts_cnf):
"""
Method for inferring properties about objects.
"""
if not satisfiable(And(known_facts_cnf, assumptions, proposition)):
return False
if not satisfiable(And(known_facts_cnf, assumptions, Not(proposition))):
return True
return None
def register_handler(key, handler):
"""
Register a handler in the ask system. key must be a string and handler a
class inheriting from AskHandler::
>>> from sympy.assumptions import register_handler, ask, Q
>>> from sympy.assumptions.handlers import AskHandler
>>> class MersenneHandler(AskHandler):
... # Mersenne numbers are in the form 2**n + 1, n integer
... @staticmethod
... def Integer(expr, assumptions):
... import math
... return ask(Q.integer(math.log(expr + 1, 2)))
>>> register_handler('mersenne', MersenneHandler)
>>> ask(Q.mersenne(7))
True
"""
if type(key) is Predicate:
key = key.name
try:
getattr(Q, key).add_handler(handler)
except AttributeError:
setattr(Q, key, Predicate(key, handlers=[handler]))
def remove_handler(key, handler):
"""Removes a handler from the ask system. Same syntax as register_handler"""
if type(key) is Predicate:
key = key.name
getattr(Q, key).remove_handler(handler)
def single_fact_lookup(known_facts_keys, known_facts_cnf):
# Compute the quick lookup for single facts
mapping = {}
for key in known_facts_keys:
mapping[key] = set([key])
for other_key in known_facts_keys:
if other_key != key:
if ask_full_inference(other_key, key, known_facts_cnf):
mapping[key].add(other_key)
return mapping
def compute_known_facts(known_facts, known_facts_keys):
"""Compute the various forms of knowledge compilation used by the
assumptions system.
This function is typically applied to the variables
``known_facts`` and ``known_facts_keys`` defined at the bottom of
this file.
"""
from textwrap import dedent, wrap
fact_string = dedent('''\
"""
The contents of this file are the return value of
``sympy.assumptions.ask.compute_known_facts``. Do NOT manually
edit this file.
"""
from sympy.logic.boolalg import And, Not, Or
from sympy.assumptions.ask import Q
# -{ Known facts in CNF }-
known_facts_cnf = And(
%s
)
# -{ Known facts in compressed sets }-
known_facts_dict = {
%s
}
''')
# Compute the known facts in CNF form for logical inference
LINE = ",\n "
HANG = ' '*8
cnf = to_cnf(known_facts)
c = LINE.join([str(a) for a in cnf.args])
mapping = single_fact_lookup(known_facts_keys, cnf)
m = LINE.join(['\n'.join(
wrap("%s: %s" % item,
subsequent_indent=HANG,
break_long_words=False))
for item in mapping.items()]) + ','
return fact_string % (c, m)
# handlers_dict tells us what ask handler we should use
# for a particular key
_val_template = 'sympy.assumptions.handlers.%s'
_handlers = [
("antihermitian", "sets.AskAntiHermitianHandler"),
("bounded", "calculus.AskBoundedHandler"),
("commutative", "AskCommutativeHandler"),
("complex", "sets.AskComplexHandler"),
("composite", "ntheory.AskCompositeHandler"),
("even", "ntheory.AskEvenHandler"),
("extended_real", "sets.AskExtendedRealHandler"),
("hermitian", "sets.AskHermitianHandler"),
("imaginary", "sets.AskImaginaryHandler"),
("infinitesimal", "calculus.AskInfinitesimalHandler"),
("integer", "sets.AskIntegerHandler"),
("irrational", "sets.AskIrrationalHandler"),
("rational", "sets.AskRationalHandler"),
("negative", "order.AskNegativeHandler"),
("nonzero", "order.AskNonZeroHandler"),
("positive", "order.AskPositiveHandler"),
("prime", "ntheory.AskPrimeHandler"),
("real", "sets.AskRealHandler"),
("odd", "ntheory.AskOddHandler"),
("algebraic", "sets.AskAlgebraicHandler"),
("is_true", "TautologicalHandler"),
("symmetric", "matrices.AskSymmetricHandler"),
("invertible", "matrices.AskInvertibleHandler"),
("orthogonal", "matrices.AskOrthogonalHandler"),
("positive_definite", "matrices.AskPositiveDefiniteHandler"),
("upper_triangular", "matrices.AskUpperTriangularHandler"),
("lower_triangular", "matrices.AskLowerTriangularHandler"),
("diagonal", "matrices.AskDiagonalHandler"),
("fullrank", "matrices.AskFullRankHandler"),
("square", "matrices.AskSquareHandler"),
]
for name, value in _handlers:
register_handler(name, _val_template % value)
known_facts_keys = [getattr(Q, attr) for attr in Q.__dict__
if not attr.startswith('__')]
known_facts = And(
Implies(Q.real, Q.complex),
Implies(Q.real, Q.hermitian),
Equivalent(Q.even, Q.integer & ~Q.odd),
Equivalent(Q.extended_real, Q.real | Q.infinity),
Equivalent(Q.odd, Q.integer & ~Q.even),
Equivalent(Q.prime, Q.integer & Q.positive & ~Q.composite),
Implies(Q.integer, Q.rational),
Implies(Q.imaginary, Q.complex & ~Q.real),
Implies(Q.imaginary, Q.antihermitian),
Implies(Q.antihermitian, ~Q.hermitian),
Equivalent(Q.negative, Q.nonzero & ~Q.positive),
Equivalent(Q.positive, Q.nonzero & ~Q.negative),
Equivalent(Q.rational, Q.real & ~Q.irrational),
Equivalent(Q.real, Q.rational | Q.irrational),
Implies(Q.nonzero, Q.real),
Equivalent(Q.nonzero, Q.positive | Q.negative),
Implies(Q.orthogonal, Q.positive_definite),
Implies(Q.positive_definite, Q.invertible),
Implies(Q.diagonal, Q.upper_triangular),
Implies(Q.diagonal, Q.lower_triangular),
Implies(Q.lower_triangular, Q.triangular),
Implies(Q.upper_triangular, Q.triangular),
Implies(Q.triangular, Q.upper_triangular | Q.lower_triangular),
Implies(Q.upper_triangular & Q.lower_triangular, Q.diagonal),
Implies(Q.diagonal, Q.symmetric),
Implies(Q.unit_triangular, Q.triangular),
Implies(Q.invertible, Q.fullrank),
Implies(Q.invertible, Q.square),
Implies(Q.symmetric, Q.square),
Implies(Q.fullrank & Q.square, Q.invertible),
)
from sympy.assumptions.ask_generated import known_facts_dict, known_facts_cnf
| bsd-3-clause |
kderynski/acos-client | tests/unit/test_member.py | 4 | 2240 | # Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest2 as unittest
import acos_client.errors as acos_errors
import v21_mocks as mocks
class TestMember(unittest.TestCase):
def test_member_delete(self):
with mocks.MemberDelete().client() as c:
c.slb.service_group.member.delete('pool1', 's1', 80)
def test_member_delete_not_found(self):
with mocks.MemberDeleteNotFound().client() as c:
c.slb.service_group.member.delete('pool1', 's1', 80)
def test_member_create(self):
with mocks.MemberCreate().client() as c:
c.slb.service_group.member.create('pool1', 's1', 80)
def test_member_create_exists(self):
with mocks.MemberCreateExists().client() as c:
with self.assertRaises(acos_errors.Exists):
c.slb.service_group.member.create('pool1', 's1', 80)
def test_member_update(self):
with mocks.MemberUpdate().client() as c:
c.slb.service_group.member.update('pool1', 's1', 80,
c.slb.DOWN)
def test_member_update_not_found(self):
with mocks.MemberUpdateNotFound().client() as c:
with self.assertRaises(acos_errors.NotFound):
c.slb.service_group.member.update(
'pool1', 's1', 80,
c.slb.DOWN)
def test_member_update_no_such_service_group(self):
with mocks.MemberUpdateNoSuchServiceGroup().client() as c:
with self.assertRaises(acos_errors.NotFound):
c.slb.service_group.member.update(
'pool1', 's1', 80,
c.slb.DOWN)
| apache-2.0 |
jank3/django | django/template/backends/django.py | 240 | 5574 | # Since this package contains a "django" module, this is required on Python 2.
from __future__ import absolute_import
import sys
import warnings
from importlib import import_module
from pkgutil import walk_packages
from django.apps import apps
from django.conf import settings
from django.template import TemplateDoesNotExist
from django.template.context import Context, RequestContext, make_context
from django.template.engine import Engine, _dirs_undefined
from django.template.library import InvalidTemplateLibrary
from django.utils import six
from django.utils.deprecation import RemovedInDjango110Warning
from .base import BaseEngine
class DjangoTemplates(BaseEngine):
app_dirname = 'templates'
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
options.setdefault('debug', settings.DEBUG)
options.setdefault('file_charset', settings.FILE_CHARSET)
libraries = options.get('libraries', {})
options['libraries'] = self.get_templatetag_libraries(libraries)
super(DjangoTemplates, self).__init__(params)
self.engine = Engine(self.dirs, self.app_dirs, **options)
def from_string(self, template_code):
return Template(self.engine.from_string(template_code), self)
def get_template(self, template_name, dirs=_dirs_undefined):
try:
return Template(self.engine.get_template(template_name, dirs), self)
except TemplateDoesNotExist as exc:
reraise(exc, self)
def get_templatetag_libraries(self, custom_libraries):
"""
Return a collation of template tag libraries from installed
applications and the supplied custom_libraries argument.
"""
libraries = get_installed_libraries()
libraries.update(custom_libraries)
return libraries
class Template(object):
def __init__(self, template, backend):
self.template = template
self.backend = backend
@property
def origin(self):
return self.template.origin
def render(self, context=None, request=None):
# A deprecation path is required here to cover the following usage:
# >>> from django.template import Context
# >>> from django.template.loader import get_template
# >>> template = get_template('hello.html')
# >>> template.render(Context({'name': 'world'}))
# In Django 1.7 get_template() returned a django.template.Template.
# In Django 1.8 it returns a django.template.backends.django.Template.
# In Django 1.10 the isinstance checks should be removed. If passing a
# Context or a RequestContext works by accident, it won't be an issue
# per se, but it won't be officially supported either.
if isinstance(context, RequestContext):
if request is not None and request is not context.request:
raise ValueError(
"render() was called with a RequestContext and a request "
"argument which refer to different requests. Make sure "
"that the context argument is a dict or at least that "
"the two arguments refer to the same request.")
warnings.warn(
"render() must be called with a dict, not a RequestContext.",
RemovedInDjango110Warning, stacklevel=2)
elif isinstance(context, Context):
warnings.warn(
"render() must be called with a dict, not a Context.",
RemovedInDjango110Warning, stacklevel=2)
else:
context = make_context(context, request)
try:
return self.template.render(context)
except TemplateDoesNotExist as exc:
reraise(exc, self.backend)
def reraise(exc, backend):
"""
Reraise TemplateDoesNotExist while maintaining template debug information.
"""
new = exc.__class__(*exc.args, tried=exc.tried, backend=backend)
if hasattr(exc, 'template_debug'):
new.template_debug = exc.template_debug
six.reraise(exc.__class__, new, sys.exc_info()[2])
def get_installed_libraries():
"""
Return the built-in template tag libraries and those from installed
applications. Libraries are stored in a dictionary where keys are the
individual module names, not the full module paths. Example:
django.templatetags.i18n is stored as i18n.
"""
libraries = {}
candidates = ['django.templatetags']
candidates.extend(
'%s.templatetags' % app_config.name
for app_config in apps.get_app_configs())
for candidate in candidates:
try:
pkg = import_module(candidate)
except ImportError:
# No templatetags package defined. This is safe to ignore.
continue
if hasattr(pkg, '__path__'):
for name in get_package_libraries(pkg):
libraries[name[len(candidate) + 1:]] = name
return libraries
def get_package_libraries(pkg):
"""
Recursively yield template tag libraries defined in submodules of a
package.
"""
for entry in walk_packages(pkg.__path__, pkg.__name__ + '.'):
try:
module = import_module(entry[1])
except ImportError as e:
raise InvalidTemplateLibrary(
"Invalid template library specified. ImportError raised when "
"trying to load '%s': %s" % (entry[1], e)
)
if hasattr(module, 'register'):
yield entry[1]
| bsd-3-clause |
ironchicken/pycoon | src/pycoon/serializers/html_serializer.py | 1 | 2383 | """
Copyright (C) Richard Lewis 2006
This software is licensed under the terms of the GNU GPL.
"""
from pycoon.serializers import serializer, SerializerError
from pycoon.components import invokation_syntax
from pycoon.helpers import correct_script_chars
from lxml.etree import tounicode
import os
try:
import tidy
UTIDYLIB_AVAIL = True
except ImportError:
UTIDYLIB_AVAIL = False
def register_invokation_syntax(server):
"""
Allows the component to register the required XML element syntax for it's invokation
in sitemap files with the sitemap_config_parse class.
"""
invk_syn = invokation_syntax()
invk_syn.element_name = "serialize"
invk_syn.allowed_parent_components = ["pipeline", "match", "when", "otherwise"]
invk_syn.required_attribs = ["type"]
invk_syn.required_attrib_values = {"type": "html"}
invk_syn.optional_attribs = ["mime"]
invk_syn.allowed_child_components = []
server.component_syntaxes[("serialize", "html")] = invk_syn
return invk_syn
class html_serializer(serializer):
"""
html_serializer class encapsulates the uTidyLib class.
"""
def __init__(self, parent, mime="text/html", root_path=""):
"""
html_serializer constructor.
"""
serializer.__init__(self, parent, root_path)
self.mime_str = mime
self.description = "html_serializer()"
def _descend(self, req, p_sibling_result=None):
return False
def _result(self, req, p_sibling_result=None, child_results=[]):
"""
Executes tidy.parseString on the p_sibling_result and returns the resultant HTML.
"""
try:
if os.name != "nt" and UTIDYLIB_AVAIL:
options = dict(output_html=1, add_xml_decl=1, doctype="strict", indent=1, wrap=120, tidy_mark=0,\
input_encoding="utf8", output_encoding="utf8")
return (True, (correct_script_chars(str(tidy.parseString(tounicode(p_sibling_result).encode("utf-8"), **options))), self.mime_str))
else:
return (True, (correct_script_chars(tounicode(p_sibling_result).encode("utf-8")), self.mime_str))
except TypeError:
if p_sibling_result is None:
raise SerializerError("html_serializer: preceding pipeline components have returned no content!")
| gpl-2.0 |
kennethreitz/httpbin | test_httpbin.py | 3 | 32179 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import base64
import unittest
import contextlib
import six
import json
from werkzeug.http import parse_dict_header
from hashlib import md5, sha256, sha512
from six import BytesIO
import httpbin
from httpbin.helpers import parse_multi_value_header
@contextlib.contextmanager
def _setenv(key, value):
"""Context manager to set an environment variable temporarily."""
old_value = os.environ.get(key, None)
if value is None:
os.environ.pop(key, None)
else:
os.environ[key] = value
yield
if old_value is None:
os.environ.pop(key, None)
else:
os.environ[key] = value
def _string_to_base64(string):
"""Encodes string to utf-8 and then base64"""
utf8_encoded = string.encode('utf-8')
return base64.urlsafe_b64encode(utf8_encoded)
def _hash(data, algorithm):
"""Encode binary data according to specified algorithm, use MD5 by default"""
if algorithm == 'SHA-256':
return sha256(data).hexdigest()
elif algorithm == 'SHA-512':
return sha512(data).hexdigest()
else:
return md5(data).hexdigest()
def _make_digest_auth_header(username, password, method, uri, nonce,
realm=None, opaque=None, algorithm=None,
qop=None, cnonce=None, nc=None, body=None):
"""Compile a digest authentication header string.
Arguments:
- `nonce`: nonce string, received within "WWW-Authenticate" header
- `realm`: realm string, received within "WWW-Authenticate" header
- `opaque`: opaque string, received within "WWW-Authenticate" header
- `algorithm`: type of hashing algorithm, used by the client
- `qop`: type of quality-of-protection, used by the client
- `cnonce`: client nonce, required if qop is "auth" or "auth-int"
- `nc`: client nonce count, required if qop is "auth" or "auth-int"
- `body`: body of the outgoing request (bytes), used if qop is "auth-int"
"""
assert username
assert password
assert nonce
assert method
assert uri
assert algorithm in ('MD5', 'SHA-256', 'SHA-512', None)
a1 = ':'.join([username, realm or '', password])
ha1 = _hash(a1.encode('utf-8'), algorithm)
a2 = ':'.join([method, uri])
if qop == 'auth-int':
a2 = ':'.join([a2, _hash(body or b'', algorithm)])
ha2 = _hash(a2.encode('utf-8'), algorithm)
a3 = ':'.join([ha1, nonce])
if qop in ('auth', 'auth-int'):
assert cnonce
assert nc
a3 = ':'.join([a3, nc, cnonce, qop])
a3 = ':'.join([a3, ha2])
auth_response = _hash(a3.encode('utf-8'), algorithm)
auth_header = \
'Digest username="{0}", response="{1}", uri="{2}", nonce="{3}"'\
.format(username, auth_response, uri, nonce)
# 'realm' and 'opaque' should be returned unchanged, even if empty
if realm != None:
auth_header += ', realm="{0}"'.format(realm)
if opaque != None:
auth_header += ', opaque="{0}"'.format(opaque)
if algorithm:
auth_header += ', algorithm="{0}"'.format(algorithm)
if cnonce:
auth_header += ', cnonce="{0}"'.format(cnonce)
if nc:
auth_header += ', nc={0}'.format(nc)
if qop:
auth_header += ', qop={0}'.format(qop)
return auth_header
class HttpbinTestCase(unittest.TestCase):
"""Httpbin tests"""
def setUp(self):
httpbin.app.debug = True
self.app = httpbin.app.test_client()
def test_index(self):
response = self.app.get('/', headers={'User-Agent': 'test'})
self.assertEqual(response.status_code, 200)
def get_data(self, response):
if 'get_data' in dir(response):
return response.get_data()
else:
return response.data
def test_response_headers_simple(self):
supported_verbs = ['get', 'post']
for verb in supported_verbs:
method = getattr(self.app, verb)
response = method('/response-headers?animal=dog')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers.get_all('animal'), ['dog'])
assert json.loads(response.data.decode('utf-8'))['animal'] == 'dog'
def test_response_headers_multi(self):
supported_verbs = ['get', 'post']
for verb in supported_verbs:
method = getattr(self.app, verb)
response = method('/response-headers?animal=dog&animal=cat')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers.get_all('animal'), ['dog', 'cat'])
assert json.loads(response.data.decode('utf-8'))['animal'] == ['dog', 'cat']
def test_get(self):
response = self.app.get('/get', headers={'User-Agent': 'test'})
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['args'], {})
self.assertEqual(data['headers']['Host'], 'localhost')
self.assertEqual(data['headers']['Content-Length'], '0')
self.assertEqual(data['headers']['User-Agent'], 'test')
# self.assertEqual(data['origin'], None)
self.assertEqual(data['url'], 'http://localhost/get')
self.assertTrue(response.data.endswith(b'\n'))
def test_anything(self):
response = self.app.get('/anything')
self.assertEqual(response.status_code, 200)
response = self.app.get('/anything/foo/bar')
self.assertEqual(response.status_code, 200)
data = json.loads(response.data.decode('utf-8'))
self.assertEqual(data['args'], {})
self.assertEqual(data['headers']['Host'], 'localhost')
self.assertEqual(data['headers']['Content-Length'], '0')
self.assertEqual(data['url'], 'http://localhost/anything/foo/bar')
self.assertEqual(data['method'], 'GET')
self.assertTrue(response.data.endswith(b'\n'))
def test_base64(self):
greeting = u'Здравствуй, мир!'
b64_encoded = _string_to_base64(greeting)
response = self.app.get(b'/base64/' + b64_encoded)
content = response.data.decode('utf-8')
self.assertEqual(greeting, content)
def test_post_binary(self):
response = self.app.post('/post',
data=b'\x01\x02\x03\x81\x82\x83',
content_type='application/octet-stream')
self.assertEqual(response.status_code, 200)
def test_post_body_text(self):
with open('httpbin/core.py') as f:
response = self.app.post('/post', data={"file": f.read()})
self.assertEqual(response.status_code, 200)
def test_post_body_binary(self):
response = self.app.post(
'/post',
data={"file": b'\x01\x02\x03\x81\x82\x83'})
self.assertEqual(response.status_code, 200)
def test_post_body_unicode(self):
response = self.app.post('/post', data=u'оживлённым'.encode('utf-8'))
self.assertEqual(json.loads(response.data.decode('utf-8'))['data'], u'оживлённым')
def test_post_file_with_missing_content_type_header(self):
# I built up the form data manually here because I couldn't find a way
# to convince the werkzeug test client to send files without the
# content-type of the file set.
data = '--bound\r\nContent-Disposition: form-data; name="media"; '
data += 'filename="test.bin"\r\n\r\n\xa5\xc6\n--bound--\r\n'
response = self.app.post(
'/post',
content_type='multipart/form-data; boundary=bound',
data=data,
)
self.assertEqual(response.status_code, 200)
"""
This is currently a sort of negative-test.
We validate that when running Flask-only server that
Transfer-Encoding: chunked requests are unsupported and
we return 501 Not Implemented
"""
def test_post_chunked(self):
data = '{"animal":"dog"}'
response = self.app.post(
'/post',
content_type='application/json',
headers=[('Transfer-Encoding', 'chunked')],
data=data,
)
self.assertEqual(response.status_code, 501)
#self.assertEqual(response.status_code, 200)
#self.assertEqual(json.loads(response.data.decode('utf-8'))['data'], '{"animal":"dog"}')
#self.assertEqual(json.loads(response.data.decode('utf-8'))['json'], {"animal": "dog"})
def test_set_cors_headers_after_request(self):
response = self.app.get('/get')
self.assertEqual(
response.headers.get('Access-Control-Allow-Origin'), '*'
)
def test_set_cors_credentials_headers_after_auth_request(self):
response = self.app.get('/basic-auth/foo/bar')
self.assertEqual(
response.headers.get('Access-Control-Allow-Credentials'), 'true'
)
def test_set_cors_headers_after_request_with_request_origin(self):
response = self.app.get('/get', headers={'Origin': 'origin'})
self.assertEqual(
response.headers.get('Access-Control-Allow-Origin'), 'origin'
)
def test_set_cors_headers_with_options_verb(self):
response = self.app.open('/get', method='OPTIONS')
self.assertEqual(
response.headers.get('Access-Control-Allow-Origin'), '*'
)
self.assertEqual(
response.headers.get('Access-Control-Allow-Credentials'), 'true'
)
self.assertEqual(
response.headers.get('Access-Control-Allow-Methods'),
'GET, POST, PUT, DELETE, PATCH, OPTIONS'
)
self.assertEqual(
response.headers.get('Access-Control-Max-Age'), '3600'
)
# FIXME should we add any extra headers?
self.assertNotIn(
'Access-Control-Allow-Headers', response.headers
)
def test_set_cors_allow_headers(self):
response = self.app.open('/get', method='OPTIONS', headers={'Access-Control-Request-Headers': 'X-Test-Header'})
self.assertEqual(
response.headers.get('Access-Control-Allow-Headers'), 'X-Test-Header'
)
def test_headers(self):
headers = {
"Accept": "*/*",
"Host": "localhost:1234",
"User-Agent": "curl/7.54.0",
"Via": "bar"
}
response = self.app.get('/headers', headers=headers)
self.assertEqual(response.status_code, 200)
self.assertTrue({'Accept', 'Host', 'User-Agent'}.issubset(set(response.json['headers'].keys())))
self.assertNotIn('Via', response.json)
def test_headers_show_env(self):
headers = {
"Accept": "*/*",
"Host": "localhost:1234",
"User-Agent": "curl/7.54.0",
"Via": "bar"
}
response = self.app.get('/headers?show_env=true', headers=headers)
self.assertEqual(response.status_code, 200)
self.assertTrue({'Accept', 'Host', 'User-Agent', 'Via'}.issubset(set(response.json['headers'].keys())))
def test_user_agent(self):
response = self.app.get(
'/user-agent', headers={'User-Agent': 'test'}
)
self.assertIn('test', response.data.decode('utf-8'))
self.assertEqual(response.status_code, 200)
def test_gzip(self):
response = self.app.get('/gzip')
self.assertEqual(response.status_code, 200)
def test_brotli(self):
response = self.app.get('/brotli')
self.assertEqual(response.status_code, 200)
def test_bearer_auth(self):
token = 'abcd1234'
response = self.app.get(
'/bearer',
headers={'Authorization': 'Bearer ' + token}
)
self.assertEqual(response.status_code, 200)
assert json.loads(response.data.decode('utf-8'))['token'] == token
def test_bearer_auth_with_wrong_authorization_type(self):
"""Sending an non-Bearer Authorization header to /bearer should return a 401"""
auth_headers = (
('Authorization', 'Basic 1234abcd'),
('Authorization', ''),
('', '')
)
for header in auth_headers:
response = self.app.get(
'/bearer',
headers={header[0]: header[1]}
)
self.assertEqual(response.status_code, 401)
def test_bearer_auth_with_missing_token(self):
"""Sending an 'Authorization: Bearer' header with no token to /bearer should return a 401"""
response = self.app.get(
'/bearer',
headers={'Authorization': 'Bearer'}
)
self.assertEqual(response.status_code, 401)
def test_digest_auth_with_wrong_password(self):
auth_header = 'Digest username="user",realm="wrong",nonce="wrong",uri="/digest-auth/user/passwd/MD5",response="wrong",opaque="wrong"'
response = self.app.get(
'/digest-auth/auth/user/passwd/MD5',
environ_base={
# httpbin's digest auth implementation uses the remote addr to
# build the nonce
'REMOTE_ADDR': '127.0.0.1',
},
headers={
'Authorization': auth_header,
}
)
self.assertTrue('Digest' in response.headers.get('WWW-Authenticate'))
self.assertEqual(response.status_code, 401)
def test_digest_auth(self):
"""Test different combinations of digest auth parameters"""
username = 'user'
password = 'passwd'
for qop in None, 'auth', 'auth-int',:
for algorithm in None, 'MD5', 'SHA-256', 'SHA-512':
for body in None, b'', b'request payload':
for stale_after in (None, 1, 4) if algorithm else (None,) :
self._test_digest_auth(username, password, qop, algorithm, body, stale_after)
def test_digest_auth_with_wrong_authorization_type(self):
"""Sending an non-digest Authorization header to /digest-auth should return a 401"""
auth_headers = (
('Authorization', 'Basic 1234abcd'),
('Authorization', ''),
('', '')
)
for header in auth_headers:
response = self.app.get(
'/digest-auth/auth/myname/mysecret',
headers={header[0]: header[1]}
)
self.assertEqual(response.status_code, 401)
def _test_digest_auth(self, username, password, qop, algorithm=None, body=None, stale_after=None):
uri = self._digest_auth_create_uri(username, password, qop, algorithm, stale_after)
unauthorized_response = self._test_digest_auth_first_challenge(uri)
header = unauthorized_response.headers.get('WWW-Authenticate')
authorized_response, nonce = self._test_digest_response_for_auth_request(header, username, password, qop, uri, body)
self.assertEqual(authorized_response.status_code, 200)
if None == stale_after :
return
# test stale after scenerio
self._digest_auth_stale_after_check(header, username, password, uri, body, qop, stale_after)
def _test_digest_auth_first_challenge(self, uri):
unauthorized_response = self.app.get(
uri,
environ_base={
# digest auth uses the remote addr to build the nonce
'REMOTE_ADDR': '127.0.0.1',
}
)
# make sure it returns a 401
self.assertEqual(unauthorized_response.status_code, 401)
return unauthorized_response
def _digest_auth_create_uri(self, username, password, qop, algorithm, stale_after):
uri = '/digest-auth/{0}/{1}/{2}'.format(qop or 'wrong-qop', username, password)
if algorithm:
uri += '/' + algorithm
if stale_after:
uri += '/{0}'.format(stale_after)
return uri
def _digest_auth_stale_after_check(self, header, username, password, uri, body, qop, stale_after):
for nc in range(2, stale_after + 1):
authorized_response, nonce = self._test_digest_response_for_auth_request(header, username, password, qop, uri, \
body, nc)
self.assertEqual(authorized_response.status_code, 200)
stale_response, nonce = self._test_digest_response_for_auth_request(header, username, password, qop, uri, \
body, stale_after + 1)
self.assertEqual(stale_response.status_code, 401)
header = stale_response.headers.get('WWW-Authenticate')
self.assertIn('stale=TRUE', header)
def _test_digest_response_for_auth_request(self, header, username, password, qop, uri, body, nc=1, nonce=None):
auth_type, auth_info = header.split(None, 1)
self.assertEqual(auth_type, 'Digest')
d = parse_dict_header(auth_info)
nonce = nonce or d['nonce']
realm = d['realm']
opaque = d['opaque']
if qop :
self.assertIn(qop, [x.strip() for x in d['qop'].split(',')], 'Challenge should contains expected qop')
algorithm = d['algorithm']
cnonce, nc = (_hash(os.urandom(10), "MD5"), '{:08}'.format(nc)) if qop in ('auth', 'auth-int') else (None, None)
auth_header = _make_digest_auth_header(
username, password, 'GET', uri, nonce, realm, opaque, algorithm, qop, cnonce, nc, body)
# make second request
return self.app.get(
uri,
environ_base={
# httpbin's digest auth implementation uses the remote addr to
# build the nonce
'REMOTE_ADDR': '127.0.0.1',
},
headers={
'Authorization': auth_header,
},
data=body
), nonce
def test_digest_auth_wrong_pass(self):
"""Test different combinations of digest auth parameters"""
username = 'user'
password = 'passwd'
for qop in None, 'auth', 'auth-int',:
for algorithm in None, 'MD5', 'SHA-256', 'SHA-512':
for body in None, b'', b'request payload':
self._test_digest_auth_wrong_pass(username, password, qop, algorithm, body, 3)
def _test_digest_auth_wrong_pass(self, username, password, qop, algorithm=None, body=None, stale_after=None):
uri = self._digest_auth_create_uri(username, password, qop, algorithm, stale_after)
unauthorized_response = self._test_digest_auth_first_challenge(uri)
header = unauthorized_response.headers.get('WWW-Authenticate')
wrong_pass_response, nonce = self._test_digest_response_for_auth_request(header, username, "wrongPassword", qop, uri, body)
self.assertEqual(wrong_pass_response.status_code, 401)
header = wrong_pass_response.headers.get('WWW-Authenticate')
self.assertNotIn('stale=TRUE', header)
reused_nonce_response, nonce = self._test_digest_response_for_auth_request(header, username, password, qop, uri, \
body, nonce=nonce)
self.assertEqual(reused_nonce_response.status_code, 401)
header = reused_nonce_response.headers.get('WWW-Authenticate')
self.assertIn('stale=TRUE', header)
def test_drip(self):
response = self.app.get('/drip?numbytes=400&duration=2&delay=1')
self.assertEqual(response.content_length, 400)
self.assertEqual(len(self.get_data(response)), 400)
self.assertEqual(response.status_code, 200)
def test_drip_with_invalid_numbytes(self):
for bad_num in -1, 0:
uri = '/drip?numbytes={0}&duration=2&delay=1'.format(bad_num)
response = self.app.get(uri)
self.assertEqual(response.status_code, 400)
def test_drip_with_custom_code(self):
response = self.app.get('/drip?numbytes=400&duration=2&code=500')
self.assertEqual(response.content_length, 400)
self.assertEqual(len(self.get_data(response)), 400)
self.assertEqual(response.status_code, 500)
def test_get_bytes(self):
response = self.app.get('/bytes/1024')
self.assertEqual(len(self.get_data(response)), 1024)
self.assertEqual(response.status_code, 200)
def test_bytes_with_seed(self):
response = self.app.get('/bytes/10?seed=0')
# The RNG changed in python3, so even though we are
# setting the seed, we can't expect the value to be the
# same across both interpreters.
if six.PY3:
self.assertEqual(
response.data, b'\xc5\xd7\x14\x84\xf8\xcf\x9b\xf4\xb7o'
)
else:
self.assertEqual(
response.data, b'\xd8\xc2kB\x82g\xc8Mz\x95'
)
def test_stream_bytes(self):
response = self.app.get('/stream-bytes/1024')
self.assertEqual(len(self.get_data(response)), 1024)
self.assertEqual(response.status_code, 200)
def test_stream_bytes_with_seed(self):
response = self.app.get('/stream-bytes/10?seed=0')
# The RNG changed in python3, so even though we are
# setting the seed, we can't expect the value to be the
# same across both interpreters.
if six.PY3:
self.assertEqual(
response.data, b'\xc5\xd7\x14\x84\xf8\xcf\x9b\xf4\xb7o'
)
else:
self.assertEqual(
response.data, b'\xd8\xc2kB\x82g\xc8Mz\x95'
)
def test_delete_endpoint_returns_body(self):
response = self.app.delete(
'/delete',
data={'name': 'kevin'},
content_type='application/x-www-form-urlencoded'
)
form_data = json.loads(response.data.decode('utf-8'))['form']
self.assertEqual(form_data, {'name': 'kevin'})
def test_methods__to_status_endpoint(self):
methods = [
'GET',
'HEAD',
'POST',
'PUT',
'DELETE',
'PATCH',
'TRACE',
]
for m in methods:
response = self.app.open(path='/status/418', method=m)
self.assertEqual(response.status_code, 418)
def test_status_endpoint_invalid_code(self):
response = self.app.get(path='/status/4!9')
self.assertEqual(response.status_code, 400)
def test_status_endpoint_invalid_codes(self):
response = self.app.get(path='/status/200,402,foo')
self.assertEqual(response.status_code, 400)
def test_xml_endpoint(self):
response = self.app.get(path='/xml')
self.assertEqual(
response.headers.get('Content-Type'), 'application/xml'
)
def test_x_forwarded_proto(self):
response = self.app.get(path='/get', headers={
'X-Forwarded-Proto':'https'
})
assert json.loads(response.data.decode('utf-8'))['url'].startswith('https://')
def test_redirect_n_higher_than_1(self):
response = self.app.get('/redirect/5')
self.assertEqual(
response.headers.get('Location'), '/relative-redirect/4'
)
def test_redirect_to_post(self):
response = self.app.post('/redirect-to?url=/post&status_code=307',
data=b'\x01\x02\x03\x81\x82\x83',
content_type='application/octet-stream')
self.assertEqual(response.status_code, 307)
self.assertEqual(
response.headers.get('Location'), '/post'
)
def test_redirect_absolute_param_n_higher_than_1(self):
response = self.app.get('/redirect/5?absolute=true')
self.assertEqual(
response.headers.get('Location'), 'http://localhost/absolute-redirect/4'
)
def test_redirect_n_equals_to_1(self):
response = self.app.get('/redirect/1')
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.headers.get('Location'), '/get'
)
def test_relative_redirect_n_equals_to_1(self):
response = self.app.get('/relative-redirect/1')
self.assertEqual(
response.headers.get('Location'), '/get'
)
def test_relative_redirect_n_higher_than_1(self):
response = self.app.get('/relative-redirect/7')
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.headers.get('Location'), '/relative-redirect/6'
)
def test_absolute_redirect_n_higher_than_1(self):
response = self.app.get('/absolute-redirect/5')
self.assertEqual(
response.headers.get('Location'), 'http://localhost/absolute-redirect/4'
)
def test_absolute_redirect_n_equals_to_1(self):
response = self.app.get('/absolute-redirect/1')
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.headers.get('Location'), 'http://localhost/get'
)
def test_request_range(self):
response1 = self.app.get('/range/1234')
self.assertEqual(response1.status_code, 200)
self.assertEqual(response1.headers.get('ETag'), 'range1234')
self.assertEqual(response1.headers.get('Content-range'), 'bytes 0-1233/1234')
self.assertEqual(response1.headers.get('Accept-ranges'), 'bytes')
self.assertEqual(len(self.get_data(response1)), 1234)
response2 = self.app.get('/range/1234')
self.assertEqual(response2.status_code, 200)
self.assertEqual(response2.headers.get('ETag'), 'range1234')
self.assertEqual(self.get_data(response1), self.get_data(response2))
def test_request_range_with_parameters(self):
response = self.app.get(
'/range/100?duration=1.5&chunk_size=5',
headers={ 'Range': 'bytes=10-24' }
)
self.assertEqual(response.status_code, 206)
self.assertEqual(response.headers.get('ETag'), 'range100')
self.assertEqual(response.headers.get('Content-range'), 'bytes 10-24/100')
self.assertEqual(response.headers.get('Accept-ranges'), 'bytes')
self.assertEqual(response.headers.get('Content-Length'), '15')
self.assertEqual(self.get_data(response), 'klmnopqrstuvwxy'.encode('utf8'))
def test_request_range_first_15_bytes(self):
response = self.app.get(
'/range/1000',
headers={ 'Range': 'bytes=0-15' }
)
self.assertEqual(response.status_code, 206)
self.assertEqual(response.headers.get('ETag'), 'range1000')
self.assertEqual(self.get_data(response), 'abcdefghijklmnop'.encode('utf8'))
self.assertEqual(response.headers.get('Content-range'), 'bytes 0-15/1000')
def test_request_range_open_ended_last_6_bytes(self):
response = self.app.get(
'/range/26',
headers={ 'Range': 'bytes=20-' }
)
self.assertEqual(response.status_code, 206)
self.assertEqual(response.headers.get('ETag'), 'range26')
self.assertEqual(self.get_data(response), 'uvwxyz'.encode('utf8'))
self.assertEqual(response.headers.get('Content-range'), 'bytes 20-25/26')
self.assertEqual(response.headers.get('Content-Length'), '6')
def test_request_range_suffix(self):
response = self.app.get(
'/range/26',
headers={ 'Range': 'bytes=-5' }
)
self.assertEqual(response.status_code, 206)
self.assertEqual(response.headers.get('ETag'), 'range26')
self.assertEqual(self.get_data(response), 'vwxyz'.encode('utf8'))
self.assertEqual(response.headers.get('Content-range'), 'bytes 21-25/26')
self.assertEqual(response.headers.get('Content-Length'), '5')
def test_request_out_of_bounds(self):
response = self.app.get(
'/range/26',
headers={ 'Range': 'bytes=10-5',
}
)
self.assertEqual(response.status_code, 416)
self.assertEqual(response.headers.get('ETag'), 'range26')
self.assertEqual(len(self.get_data(response)), 0)
self.assertEqual(response.headers.get('Content-range'), 'bytes */26')
self.assertEqual(response.headers.get('Content-Length'), '0')
response = self.app.get(
'/range/26',
headers={ 'Range': 'bytes=32-40',
}
)
self.assertEqual(response.status_code, 416)
response = self.app.get(
'/range/26',
headers={ 'Range': 'bytes=0-40',
}
)
self.assertEqual(response.status_code, 416)
def test_etag_if_none_match_matches(self):
response = self.app.get(
'/etag/abc',
headers={ 'If-None-Match': 'abc' }
)
self.assertEqual(response.status_code, 304)
self.assertEqual(response.headers.get('ETag'), 'abc')
def test_etag_if_none_match_matches_list(self):
response = self.app.get(
'/etag/abc',
headers={ 'If-None-Match': '"123", "abc"' }
)
self.assertEqual(response.status_code, 304)
self.assertEqual(response.headers.get('ETag'), 'abc')
def test_etag_if_none_match_matches_star(self):
response = self.app.get(
'/etag/abc',
headers={ 'If-None-Match': '*' }
)
self.assertEqual(response.status_code, 304)
self.assertEqual(response.headers.get('ETag'), 'abc')
def test_etag_if_none_match_w_prefix(self):
response = self.app.get(
'/etag/c3piozzzz',
headers={ 'If-None-Match': 'W/"xyzzy", W/"r2d2xxxx", W/"c3piozzzz"' }
)
self.assertEqual(response.status_code, 304)
self.assertEqual(response.headers.get('ETag'), 'c3piozzzz')
def test_etag_if_none_match_has_no_match(self):
response = self.app.get(
'/etag/abc',
headers={ 'If-None-Match': '123' }
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers.get('ETag'), 'abc')
def test_etag_if_match_matches(self):
response = self.app.get(
'/etag/abc',
headers={ 'If-Match': 'abc' }
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers.get('ETag'), 'abc')
def test_etag_if_match_matches_list(self):
response = self.app.get(
'/etag/abc',
headers={ 'If-Match': '"123", "abc"' }
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers.get('ETag'), 'abc')
def test_etag_if_match_matches_star(self):
response = self.app.get(
'/etag/abc',
headers={ 'If-Match': '*' }
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers.get('ETag'), 'abc')
def test_etag_if_match_has_no_match(self):
response = self.app.get(
'/etag/abc',
headers={ 'If-Match': '123' }
)
self.assertEqual(response.status_code, 412)
self.assertNotIn('ETag', response.headers)
def test_etag_with_no_headers(self):
response = self.app.get(
'/etag/abc'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers.get('ETag'), 'abc')
def test_parse_multi_value_header(self):
self.assertEqual(parse_multi_value_header('xyzzy'), [ "xyzzy" ])
self.assertEqual(parse_multi_value_header('"xyzzy"'), [ "xyzzy" ])
self.assertEqual(parse_multi_value_header('W/"xyzzy"'), [ "xyzzy" ])
self.assertEqual(parse_multi_value_header('"xyzzy", "r2d2xxxx", "c3piozzzz"'), [ "xyzzy", "r2d2xxxx", "c3piozzzz" ])
self.assertEqual(parse_multi_value_header('W/"xyzzy", W/"r2d2xxxx", W/"c3piozzzz"'), [ "xyzzy", "r2d2xxxx", "c3piozzzz" ])
self.assertEqual(parse_multi_value_header('*'), [ "*" ])
if __name__ == '__main__':
unittest.main()
| isc |
priomsrb/vimswitch | vimswitch/test/test_SwitchProfileAction.py | 1 | 6070 | from .FakeFileDownloader import createFakeFileDownloader
from .FileSystemTestCase import FileSystemTestCase
from mock import MagicMock, patch
from vimswitch.Application import Application
from vimswitch.Profile import Profile
from vimswitch.Settings import Settings
from vimswitch.SwitchProfileAction import createSwitchProfileAction
from vimswitch.six import StringIO
import os
class TestSwitchProfileAction(FileSystemTestCase):
def setUp(self):
FileSystemTestCase.setUp(self)
self.app = Application()
self.app.settings = Settings(self.getWorkingDir())
self.app.fileDownloader = createFakeFileDownloader(self.app, self.getDataPath('fake_internet'))
self.switchProfileAction = createSwitchProfileAction(self.app)
self.app.diskIo.createDirWithParents(self.app.settings.cachePath)
self.app.diskIo.createDirWithParents(self.app.settings.downloadsPath)
self.profile = Profile('test/vimrc')
def test_switchToProfile_defaultProfileDoesNotExist_createsDefaultProfile(self):
self.switchProfileAction.profile = self.profile
self.switchProfileAction.execute()
defaultProfile = self.app.settings.defaultProfile
self.assertTrue(self.app.profileCache.contains(defaultProfile))
def test_switchToProfile_profileNotInCache_downloadsProfile(self):
self.switchProfileAction.profile = self.profile
self.switchProfileAction.execute()
self.assertTrue(self.app.profileCache.contains(self.profile))
def test_switchToProfile_profileInCache_doesNotDownloadProfile(self):
self.app.fileDownloader.download = MagicMock(side_effect=AssertionError('Profile should not be downloaded'))
self.app.profileCache.createEmptyProfile(self.profile)
self.switchProfileAction.profile = self.profile
self.switchProfileAction.execute()
def test_switchToProfile_copiesProfileToHome(self):
self.switchProfileAction.profile = self.profile
self.switchProfileAction.execute()
expectedVimrc = '" test vimrc data'
actualVimrc = self.app.diskIo.getFileContents(self.getTestPath('.vimrc'))
self.assertEqual(expectedVimrc, actualVimrc)
vimDirPath = self.getTestPath('.vim')
self.assertTrue(self.app.diskIo.dirExists(vimDirPath))
def test_switchToProfile_copiesHomeToCache(self):
vimrcPath = self.getTestPath('.vimrc')
vimDirPath = self.getTestPath('.vim')
self.app.diskIo.createFile(vimrcPath, '" default vimrc')
self.app.diskIo.createDir(vimDirPath)
self.switchProfileAction.profile = self.profile
self.switchProfileAction.execute()
defaultProfile = self.app.settings.defaultProfile
cachedVimrcPath = os.path.join(self.app.profileCache.getProfileLocation(defaultProfile), '.vimrc')
expectedVimrc = '" default vimrc'
actualVimrc = self.app.diskIo.getFileContents(cachedVimrcPath)
self.assertEqual(expectedVimrc, actualVimrc)
cachedVimDirPath = os.path.join(self.app.profileCache.getProfileLocation(defaultProfile), '.vim')
self.assertTrue(self.app.diskIo.dirExists(cachedVimDirPath))
def test_switchToProfile_savesProfileChangesToCache(self):
self.switchProfileAction.profile = self.profile
self.switchProfileAction.execute()
# Now we make changes to the profile
vimrcPath = self.getTestPath('.vimrc')
vimDirPath = self.getTestPath('.vim')
self.app.diskIo.createFile(vimrcPath, '" updated vimrc') # Edit file
self.app.diskIo.deleteDir(vimDirPath) # Delete dir
defaultProfile = self.app.settings.defaultProfile
self.switchProfileAction.profile = defaultProfile
self.switchProfileAction.execute()
# Assert .vimrc updated
cachedVimrcPath = os.path.join(self.app.profileCache.getProfileLocation(self.profile), '.vimrc')
expectedVimrc = '" updated vimrc'
actualVimrc = self.app.diskIo.getFileContents(cachedVimrcPath)
self.assertEqual(expectedVimrc, actualVimrc)
# Assert .vim deleted
cachedVimDirPath = os.path.join(self.app.profileCache.getProfileLocation(defaultProfile), '.vim')
self.assertFalse(self.app.diskIo.dirExists(cachedVimDirPath))
def test_switchToProfile_updateFlagSet_updatesCachedProfile(self):
# Do an initial download of the profile
self.switchProfileAction.profile = self.profile
self.switchProfileAction.execute()
# Update the profile on the internet by using the version at fake_internet2
self.app.fileDownloader.root = self.getDataPath('fake_internet2')
self.switchProfileAction.update = True
self.switchProfileAction.profile = self.profile
self.switchProfileAction.execute()
self.assertFileContents('.vimrc', '" updated vimrc data')
self.assertFileContents('.vimswitch/profiles/test.vimrc/.vimrc', '" updated vimrc data')
vimDirPath = self.getTestPath('.vim')
self.assertTrue(self.app.diskIo.dirExists(vimDirPath))
def test_switchToProfile_setsCurrentProfile(self):
self.assertNotEqual(self.app.settings.currentProfile, self.profile)
self.switchProfileAction.profile = self.profile
self.switchProfileAction.execute()
self.assertEqual(self.app.settings.currentProfile, self.profile)
@patch('sys.stdout', new_callable=StringIO)
def test_switchToProfile_prints(self, stdout):
self.switchProfileAction.profile = self.profile
self.switchProfileAction.execute()
self.assertStdout(stdout, """
Saving profile: default
Downloading profile from https://github.com/test/vimrc/archive/master.zip
Switched to profile: test/vimrc
""")
# Helpers
def assertFileContents(self, path, expectedContents):
diskIo = self.app.diskIo
path = self.getTestPath(path)
actualContents = diskIo.getFileContents(path)
self.assertEqual(actualContents, expectedContents)
| gpl-2.0 |
VirtueSecurity/aws-extender | BappModules/boto/mws/response.py | 152 | 23191 | # Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, dis- tribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the fol- lowing conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- ITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from decimal import Decimal
from boto.compat import filter, map
class ComplexType(dict):
_value = 'Value'
def __repr__(self):
return '{0}{1}'.format(getattr(self, self._value, None), self.copy())
def __str__(self):
return str(getattr(self, self._value, ''))
class DeclarativeType(object):
def __init__(self, _hint=None, **kw):
self._value = None
if _hint is not None:
self._hint = _hint
return
class JITResponse(ResponseElement):
pass
self._hint = JITResponse
self._hint.__name__ = 'JIT_{0}/{1}'.format(self.__class__.__name__,
hex(id(self._hint))[2:])
for name, value in kw.items():
setattr(self._hint, name, value)
def __repr__(self):
parent = getattr(self, '_parent', None)
return '<{0}_{1}/{2}_{3}>'.format(self.__class__.__name__,
parent and parent._name or '?',
getattr(self, '_name', '?'),
hex(id(self.__class__)))
def setup(self, parent, name, *args, **kw):
self._parent = parent
self._name = name
self._clone = self.__class__(_hint=self._hint)
self._clone._parent = parent
self._clone._name = name
setattr(self._parent, self._name, self._clone)
def start(self, *args, **kw):
raise NotImplementedError
def end(self, *args, **kw):
raise NotImplementedError
def teardown(self, *args, **kw):
setattr(self._parent, self._name, self._value)
class Element(DeclarativeType):
def start(self, *args, **kw):
self._value = self._hint(parent=self._parent, **kw)
return self._value
def end(self, *args, **kw):
pass
class SimpleList(DeclarativeType):
def __init__(self, *args, **kw):
super(SimpleList, self).__init__(*args, **kw)
self._value = []
def start(self, *args, **kw):
return None
def end(self, name, value, *args, **kw):
self._value.append(value)
class ElementList(SimpleList):
def start(self, *args, **kw):
value = self._hint(parent=self._parent, **kw)
self._value.append(value)
return value
def end(self, *args, **kw):
pass
class MemberList(Element):
def __init__(self, _member=None, _hint=None, *args, **kw):
message = 'Invalid `member` specification in {0}'.format(self.__class__.__name__)
assert 'member' not in kw, message
if _member is None:
if _hint is None:
super(MemberList, self).__init__(*args, member=ElementList(**kw))
else:
super(MemberList, self).__init__(_hint=_hint)
else:
if _hint is None:
if issubclass(_member, DeclarativeType):
member = _member(**kw)
else:
member = ElementList(_member, **kw)
super(MemberList, self).__init__(*args, member=member)
else:
message = 'Nonsensical {0} hint {1!r}'.format(self.__class__.__name__,
_hint)
raise AssertionError(message)
def teardown(self, *args, **kw):
if self._value is None:
self._value = []
else:
if isinstance(self._value.member, DeclarativeType):
self._value.member = []
self._value = self._value.member
super(MemberList, self).teardown(*args, **kw)
class ResponseFactory(object):
def __init__(self, scopes=None):
self.scopes = [] if scopes is None else scopes
def element_factory(self, name, parent):
class DynamicElement(parent):
_name = name
setattr(DynamicElement, '__name__', str(name))
return DynamicElement
def search_scopes(self, key):
for scope in self.scopes:
if hasattr(scope, key):
return getattr(scope, key)
if hasattr(scope, '__getitem__'):
if key in scope:
return scope[key]
def find_element(self, action, suffix, parent):
element = self.search_scopes(action + suffix)
if element is not None:
return element
if action.endswith('ByNextToken'):
element = self.search_scopes(action[:-len('ByNextToken')] + suffix)
if element is not None:
return self.element_factory(action + suffix, element)
return self.element_factory(action + suffix, parent)
def __call__(self, action, connection=None):
response = self.find_element(action, 'Response', Response)
if not hasattr(response, action + 'Result'):
result = self.find_element(action, 'Result', ResponseElement)
setattr(response, action + 'Result', Element(result))
return response(connection=connection)
def strip_namespace(func):
def wrapper(self, name, *args, **kw):
if self._namespace is not None:
if name.startswith(self._namespace + ':'):
name = name[len(self._namespace + ':'):]
return func(self, name, *args, **kw)
return wrapper
class ResponseElement(dict):
_override = {}
_name = None
_namespace = None
def __init__(self, connection=None, name=None, parent=None, attrs=None):
if parent is not None and self._namespace is None:
self._namespace = parent._namespace
if connection is not None:
self._connection = connection
self._name = name or self._name or self.__class__.__name__
self._declared('setup', attrs=attrs)
dict.__init__(self, attrs and attrs.copy() or {})
def _declared(self, op, **kw):
def inherit(obj):
result = {}
for cls in getattr(obj, '__bases__', ()):
result.update(inherit(cls))
result.update(obj.__dict__)
return result
scope = inherit(self.__class__)
scope.update(self.__dict__)
declared = lambda attr: isinstance(attr[1], DeclarativeType)
for name, node in filter(declared, scope.items()):
getattr(node, op)(self, name, parentname=self._name, **kw)
@property
def connection(self):
return self._connection
def __repr__(self):
render = lambda pair: '{0!s}: {1!r}'.format(*pair)
do_show = lambda pair: not pair[0].startswith('_')
attrs = filter(do_show, self.__dict__.items())
name = self.__class__.__name__
if name.startswith('JIT_'):
name = '^{0}^'.format(self._name or '')
return '{0}{1!r}({2})'.format(
name, self.copy(), ', '.join(map(render, attrs)))
def _type_for(self, name, attrs):
return self._override.get(name, globals().get(name, ResponseElement))
@strip_namespace
def startElement(self, name, attrs, connection):
attribute = getattr(self, name, None)
if isinstance(attribute, DeclarativeType):
return attribute.start(name=name, attrs=attrs,
connection=connection)
elif attrs.getLength():
setattr(self, name, ComplexType(attrs.copy()))
else:
return None
@strip_namespace
def endElement(self, name, value, connection):
attribute = getattr(self, name, None)
if name == self._name:
self._declared('teardown')
elif isinstance(attribute, DeclarativeType):
attribute.end(name=name, value=value, connection=connection)
elif isinstance(attribute, ComplexType):
setattr(attribute, attribute._value, value)
else:
setattr(self, name, value)
class Response(ResponseElement):
ResponseMetadata = Element()
@strip_namespace
def startElement(self, name, attrs, connection):
if name == self._name:
self.update(attrs)
else:
return super(Response, self).startElement(name, attrs, connection)
@property
def _result(self):
return getattr(self, self._action + 'Result', None)
@property
def _action(self):
return (self._name or self.__class__.__name__)[:-len('Response')]
class ResponseResultList(Response):
_ResultClass = ResponseElement
def __init__(self, *args, **kw):
setattr(self, self._action + 'Result', ElementList(self._ResultClass))
super(ResponseResultList, self).__init__(*args, **kw)
class FeedSubmissionInfo(ResponseElement):
pass
class SubmitFeedResult(ResponseElement):
FeedSubmissionInfo = Element(FeedSubmissionInfo)
class GetFeedSubmissionListResult(ResponseElement):
FeedSubmissionInfo = ElementList(FeedSubmissionInfo)
class GetFeedSubmissionCountResult(ResponseElement):
pass
class CancelFeedSubmissionsResult(GetFeedSubmissionListResult):
pass
class GetServiceStatusResult(ResponseElement):
Messages = Element(Messages=ElementList())
class ReportRequestInfo(ResponseElement):
pass
class RequestReportResult(ResponseElement):
ReportRequestInfo = Element()
class GetReportRequestListResult(RequestReportResult):
ReportRequestInfo = ElementList()
class CancelReportRequestsResult(RequestReportResult):
pass
class GetReportListResult(ResponseElement):
ReportInfo = ElementList()
class ManageReportScheduleResult(ResponseElement):
ReportSchedule = Element()
class GetReportScheduleListResult(ManageReportScheduleResult):
pass
class UpdateReportAcknowledgementsResult(GetReportListResult):
pass
class CreateInboundShipmentPlanResult(ResponseElement):
InboundShipmentPlans = MemberList(ShipToAddress=Element(),
Items=MemberList())
class ListInboundShipmentsResult(ResponseElement):
ShipmentData = MemberList(ShipFromAddress=Element())
class ListInboundShipmentItemsResult(ResponseElement):
ItemData = MemberList()
class ListInventorySupplyResult(ResponseElement):
InventorySupplyList = MemberList(
EarliestAvailability=Element(),
SupplyDetail=MemberList(
EarliestAvailableToPick=Element(),
LatestAvailableToPick=Element(),
)
)
class ComplexAmount(ResponseElement):
_amount = 'Value'
def __repr__(self):
return '{0} {1}'.format(self.CurrencyCode, getattr(self, self._amount))
def __float__(self):
return float(getattr(self, self._amount))
def __str__(self):
return str(getattr(self, self._amount))
@strip_namespace
def startElement(self, name, attrs, connection):
if name not in ('CurrencyCode', self._amount):
message = 'Unrecognized tag {0} in ComplexAmount'.format(name)
raise AssertionError(message)
return super(ComplexAmount, self).startElement(name, attrs, connection)
@strip_namespace
def endElement(self, name, value, connection):
if name == self._amount:
value = Decimal(value)
super(ComplexAmount, self).endElement(name, value, connection)
class ComplexMoney(ComplexAmount):
_amount = 'Amount'
class ComplexWeight(ResponseElement):
def __repr__(self):
return '{0} {1}'.format(self.Value, self.Unit)
def __float__(self):
return float(self.Value)
def __str__(self):
return str(self.Value)
@strip_namespace
def startElement(self, name, attrs, connection):
if name not in ('Unit', 'Value'):
message = 'Unrecognized tag {0} in ComplexWeight'.format(name)
raise AssertionError(message)
return super(ComplexWeight, self).startElement(name, attrs, connection)
@strip_namespace
def endElement(self, name, value, connection):
if name == 'Value':
value = Decimal(value)
super(ComplexWeight, self).endElement(name, value, connection)
class Dimension(ComplexType):
_value = 'Value'
class ComplexDimensions(ResponseElement):
_dimensions = ('Height', 'Length', 'Width', 'Weight')
def __repr__(self):
values = [getattr(self, key, None) for key in self._dimensions]
values = filter(None, values)
return 'x'.join(map('{0.Value:0.2f}{0[Units]}'.format, values))
@strip_namespace
def startElement(self, name, attrs, connection):
if name not in self._dimensions:
message = 'Unrecognized tag {0} in ComplexDimensions'.format(name)
raise AssertionError(message)
setattr(self, name, Dimension(attrs.copy()))
@strip_namespace
def endElement(self, name, value, connection):
if name in self._dimensions:
value = Decimal(value or '0')
ResponseElement.endElement(self, name, value, connection)
class FulfillmentPreviewItem(ResponseElement):
EstimatedShippingWeight = Element(ComplexWeight)
class FulfillmentPreview(ResponseElement):
EstimatedShippingWeight = Element(ComplexWeight)
EstimatedFees = MemberList(Amount=Element(ComplexAmount))
UnfulfillablePreviewItems = MemberList(FulfillmentPreviewItem)
FulfillmentPreviewShipments = MemberList(
FulfillmentPreviewItems=MemberList(FulfillmentPreviewItem),
)
class GetFulfillmentPreviewResult(ResponseElement):
FulfillmentPreviews = MemberList(FulfillmentPreview)
class FulfillmentOrder(ResponseElement):
DestinationAddress = Element()
NotificationEmailList = MemberList(SimpleList)
class GetFulfillmentOrderResult(ResponseElement):
FulfillmentOrder = Element(FulfillmentOrder)
FulfillmentShipment = MemberList(
FulfillmentShipmentItem=MemberList(),
FulfillmentShipmentPackage=MemberList(),
)
FulfillmentOrderItem = MemberList()
class ListAllFulfillmentOrdersResult(ResponseElement):
FulfillmentOrders = MemberList(FulfillmentOrder)
class GetPackageTrackingDetailsResult(ResponseElement):
ShipToAddress = Element()
TrackingEvents = MemberList(EventAddress=Element())
class Image(ResponseElement):
pass
class AttributeSet(ResponseElement):
ItemDimensions = Element(ComplexDimensions)
ListPrice = Element(ComplexMoney)
PackageDimensions = Element(ComplexDimensions)
SmallImage = Element(Image)
class ItemAttributes(AttributeSet):
Languages = Element(Language=ElementList())
def __init__(self, *args, **kw):
names = ('Actor', 'Artist', 'Author', 'Creator', 'Director',
'Feature', 'Format', 'GemType', 'MaterialType',
'MediaType', 'OperatingSystem', 'Platform')
for name in names:
setattr(self, name, SimpleList())
super(ItemAttributes, self).__init__(*args, **kw)
class VariationRelationship(ResponseElement):
Identifiers = Element(MarketplaceASIN=Element(),
SKUIdentifier=Element())
GemType = SimpleList()
MaterialType = SimpleList()
OperatingSystem = SimpleList()
class Price(ResponseElement):
LandedPrice = Element(ComplexMoney)
ListingPrice = Element(ComplexMoney)
Shipping = Element(ComplexMoney)
class CompetitivePrice(ResponseElement):
Price = Element(Price)
class CompetitivePriceList(ResponseElement):
CompetitivePrice = ElementList(CompetitivePrice)
class CompetitivePricing(ResponseElement):
CompetitivePrices = Element(CompetitivePriceList)
NumberOfOfferListings = SimpleList()
TradeInValue = Element(ComplexMoney)
class SalesRank(ResponseElement):
pass
class LowestOfferListing(ResponseElement):
Qualifiers = Element(ShippingTime=Element())
Price = Element(Price)
class Offer(ResponseElement):
BuyingPrice = Element(Price)
RegularPrice = Element(ComplexMoney)
class Product(ResponseElement):
_namespace = 'ns2'
Identifiers = Element(MarketplaceASIN=Element(),
SKUIdentifier=Element())
AttributeSets = Element(
ItemAttributes=ElementList(ItemAttributes),
)
Relationships = Element(
VariationParent=ElementList(VariationRelationship),
)
CompetitivePricing = ElementList(CompetitivePricing)
SalesRankings = Element(
SalesRank=ElementList(SalesRank),
)
LowestOfferListings = Element(
LowestOfferListing=ElementList(LowestOfferListing),
)
Offers = Element(
Offer=ElementList(Offer),
)
class ListMatchingProductsResult(ResponseElement):
Products = Element(Product=ElementList(Product))
class ProductsBulkOperationResult(ResponseElement):
Product = Element(Product)
Error = Element()
class ProductsBulkOperationResponse(ResponseResultList):
_ResultClass = ProductsBulkOperationResult
class GetMatchingProductResponse(ProductsBulkOperationResponse):
pass
class GetMatchingProductForIdResult(ListMatchingProductsResult):
pass
class GetMatchingProductForIdResponse(ResponseResultList):
_ResultClass = GetMatchingProductForIdResult
class GetCompetitivePricingForSKUResponse(ProductsBulkOperationResponse):
pass
class GetCompetitivePricingForASINResponse(ProductsBulkOperationResponse):
pass
class GetLowestOfferListingsForSKUResponse(ProductsBulkOperationResponse):
pass
class GetLowestOfferListingsForASINResponse(ProductsBulkOperationResponse):
pass
class GetMyPriceForSKUResponse(ProductsBulkOperationResponse):
pass
class GetMyPriceForASINResponse(ProductsBulkOperationResponse):
pass
class ProductCategory(ResponseElement):
def __init__(self, *args, **kw):
setattr(self, 'Parent', Element(ProductCategory))
super(ProductCategory, self).__init__(*args, **kw)
class GetProductCategoriesResult(ResponseElement):
Self = ElementList(ProductCategory)
class GetProductCategoriesForSKUResult(GetProductCategoriesResult):
pass
class GetProductCategoriesForASINResult(GetProductCategoriesResult):
pass
class Order(ResponseElement):
OrderTotal = Element(ComplexMoney)
ShippingAddress = Element()
PaymentExecutionDetail = Element(
PaymentExecutionDetailItem=ElementList(
PaymentExecutionDetailItem=Element(
Payment=Element(ComplexMoney)
)
)
)
class ListOrdersResult(ResponseElement):
Orders = Element(Order=ElementList(Order))
class GetOrderResult(ListOrdersResult):
pass
class OrderItem(ResponseElement):
ItemPrice = Element(ComplexMoney)
ShippingPrice = Element(ComplexMoney)
GiftWrapPrice = Element(ComplexMoney)
ItemTax = Element(ComplexMoney)
ShippingTax = Element(ComplexMoney)
GiftWrapTax = Element(ComplexMoney)
ShippingDiscount = Element(ComplexMoney)
PromotionDiscount = Element(ComplexMoney)
PromotionIds = SimpleList()
CODFee = Element(ComplexMoney)
CODFeeDiscount = Element(ComplexMoney)
class ListOrderItemsResult(ResponseElement):
OrderItems = Element(OrderItem=ElementList(OrderItem))
class ListMarketplaceParticipationsResult(ResponseElement):
ListParticipations = Element(Participation=ElementList())
ListMarketplaces = Element(Marketplace=ElementList())
class ListRecommendationsResult(ResponseElement):
ListingQualityRecommendations = MemberList(ItemIdentifier=Element())
class Customer(ResponseElement):
PrimaryContactInfo = Element()
ShippingAddressList = Element(ShippingAddress=ElementList())
AssociatedMarketplaces = Element(MarketplaceDomain=ElementList())
class ListCustomersResult(ResponseElement):
CustomerList = Element(Customer=ElementList(Customer))
class GetCustomersForCustomerIdResult(ListCustomersResult):
pass
class CartItem(ResponseElement):
CurrentPrice = Element(ComplexMoney)
SalePrice = Element(ComplexMoney)
class Cart(ResponseElement):
ActiveCartItemList = Element(CartItem=ElementList(CartItem))
SavedCartItemList = Element(CartItem=ElementList(CartItem))
class ListCartsResult(ResponseElement):
CartList = Element(Cart=ElementList(Cart))
class GetCartsResult(ListCartsResult):
pass
class Destination(ResponseElement):
AttributeList = MemberList()
class ListRegisteredDestinationsResult(ResponseElement):
DestinationList = MemberList(Destination)
class Subscription(ResponseElement):
Destination = Element(Destination)
class GetSubscriptionResult(ResponseElement):
Subscription = Element(Subscription)
class ListSubscriptionsResult(ResponseElement):
SubscriptionList = MemberList(Subscription)
class OrderReferenceDetails(ResponseElement):
Buyer = Element()
OrderTotal = Element(ComplexMoney)
Destination = Element(PhysicalDestination=Element())
SellerOrderAttributes = Element()
OrderReferenceStatus = Element()
Constraints = ElementList()
class SetOrderReferenceDetailsResult(ResponseElement):
OrderReferenceDetails = Element(OrderReferenceDetails)
class GetOrderReferenceDetailsResult(SetOrderReferenceDetailsResult):
pass
class AuthorizationDetails(ResponseElement):
AuthorizationAmount = Element(ComplexMoney)
CapturedAmount = Element(ComplexMoney)
AuthorizationFee = Element(ComplexMoney)
AuthorizationStatus = Element()
class AuthorizeResult(ResponseElement):
AuthorizationDetails = Element(AuthorizationDetails)
class GetAuthorizationDetailsResult(AuthorizeResult):
pass
class CaptureDetails(ResponseElement):
CaptureAmount = Element(ComplexMoney)
RefundedAmount = Element(ComplexMoney)
CaptureFee = Element(ComplexMoney)
CaptureStatus = Element()
class CaptureResult(ResponseElement):
CaptureDetails = Element(CaptureDetails)
class GetCaptureDetailsResult(CaptureResult):
pass
class RefundDetails(ResponseElement):
RefundAmount = Element(ComplexMoney)
FeeRefunded = Element(ComplexMoney)
RefundStatus = Element()
class RefundResult(ResponseElement):
RefundDetails = Element(RefundDetails)
class GetRefundDetails(RefundResult):
pass
| mit |
Kriechi/pathod | libpathod/language/writer.py | 5 | 2083 | import time
from netlib.exceptions import TcpDisconnect
import netlib.tcp
BLOCKSIZE = 1024
# It's not clear what the upper limit for time.sleep is. It's lower than the
# maximum int or float. 1 year should do.
FOREVER = 60 * 60 * 24 * 365
def send_chunk(fp, val, blocksize, start, end):
"""
(start, end): Inclusive lower bound, exclusive upper bound.
"""
for i in range(start, end, blocksize):
fp.write(
val[i:min(i + blocksize, end)]
)
return end - start
def write_values(fp, vals, actions, sofar=0, blocksize=BLOCKSIZE):
"""
vals: A list of values, which may be strings or Value objects.
actions: A list of (offset, action, arg) tuples. Action may be "pause"
or "disconnect".
Both vals and actions are in reverse order, with the first items last.
Return True if connection should disconnect.
"""
sofar = 0
try:
while vals:
v = vals.pop()
offset = 0
while actions and actions[-1][0] < (sofar + len(v)):
a = actions.pop()
offset += send_chunk(
fp,
v,
blocksize,
offset,
a[0] - sofar - offset
)
if a[1] == "pause":
time.sleep(
FOREVER if a[2] == "f" else a[2]
)
elif a[1] == "disconnect":
return True
elif a[1] == "inject":
send_chunk(fp, a[2], blocksize, 0, len(a[2]))
send_chunk(fp, v, blocksize, offset, len(v))
sofar += len(v)
# Remainders
while actions:
a = actions.pop()
if a[1] == "pause":
time.sleep(a[2])
elif a[1] == "disconnect":
return True
elif a[1] == "inject":
send_chunk(fp, a[2], blocksize, 0, len(a[2]))
except TcpDisconnect: # pragma: no cover
return True
| mit |
JVenberg/PokemonGo-Bot-Desktop | pywin/Lib/encodings/raw_unicode_escape.py | 852 | 1208 | """ Python 'raw-unicode-escape' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.raw_unicode_escape_encode
decode = codecs.raw_unicode_escape_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.raw_unicode_escape_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.raw_unicode_escape_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='raw-unicode-escape',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| mit |
klmitch/neutron | neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_int.py | 7 | 10211 | # Copyright (C) 2014,2015 VA Linux Systems Japan K.K.
# Copyright (C) 2014,2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
* references
** OVS agent https://wiki.openstack.org/wiki/Ovs-flow-logic
"""
import netaddr
from oslo_log import log as logging
from ryu.lib.packet import ether_types
from ryu.lib.packet import icmpv6
from ryu.lib.packet import in_proto
from neutron._i18n import _LE
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native \
import ovs_bridge
LOG = logging.getLogger(__name__)
class OVSIntegrationBridge(ovs_bridge.OVSAgentBridge):
"""openvswitch agent br-int specific logic."""
def setup_default_table(self):
self.install_normal()
self.setup_canary_table()
self.install_drop(table_id=constants.ARP_SPOOF_TABLE)
def setup_canary_table(self):
self.install_drop(constants.CANARY_TABLE)
def check_canary_table(self):
try:
flows = self.dump_flows(constants.CANARY_TABLE)
except RuntimeError:
LOG.exception(_LE("Failed to communicate with the switch"))
return constants.OVS_DEAD
return constants.OVS_NORMAL if flows else constants.OVS_RESTARTED
@staticmethod
def _local_vlan_match(_ofp, ofpp, port, vlan_vid):
return ofpp.OFPMatch(in_port=port, vlan_vid=vlan_vid)
def provision_local_vlan(self, port, lvid, segmentation_id):
(_dp, ofp, ofpp) = self._get_dp()
if segmentation_id is None:
vlan_vid = ofp.OFPVID_NONE
actions = [ofpp.OFPActionPushVlan()]
else:
vlan_vid = segmentation_id | ofp.OFPVID_PRESENT
actions = []
match = self._local_vlan_match(ofp, ofpp, port, vlan_vid)
actions += [
ofpp.OFPActionSetField(vlan_vid=lvid | ofp.OFPVID_PRESENT),
ofpp.OFPActionOutput(ofp.OFPP_NORMAL, 0),
]
self.install_apply_actions(priority=3,
match=match,
actions=actions)
def reclaim_local_vlan(self, port, segmentation_id):
(_dp, ofp, ofpp) = self._get_dp()
if segmentation_id is None:
vlan_vid = ofp.OFPVID_NONE
else:
vlan_vid = segmentation_id | ofp.OFPVID_PRESENT
match = self._local_vlan_match(ofp, ofpp, port, vlan_vid)
self.delete_flows(match=match)
@staticmethod
def _dvr_to_src_mac_match(ofp, ofpp, vlan_tag, dst_mac):
return ofpp.OFPMatch(vlan_vid=vlan_tag | ofp.OFPVID_PRESENT,
eth_dst=dst_mac)
@staticmethod
def _dvr_to_src_mac_table_id(network_type):
if network_type == p_const.TYPE_VLAN:
return constants.DVR_TO_SRC_MAC_VLAN
else:
return constants.DVR_TO_SRC_MAC
def install_dvr_to_src_mac(self, network_type,
vlan_tag, gateway_mac, dst_mac, dst_port):
table_id = self._dvr_to_src_mac_table_id(network_type)
(_dp, ofp, ofpp) = self._get_dp()
match = self._dvr_to_src_mac_match(ofp, ofpp,
vlan_tag=vlan_tag, dst_mac=dst_mac)
actions = [
ofpp.OFPActionPopVlan(),
ofpp.OFPActionSetField(eth_src=gateway_mac),
ofpp.OFPActionOutput(dst_port, 0),
]
self.install_apply_actions(table_id=table_id,
priority=4,
match=match,
actions=actions)
def delete_dvr_to_src_mac(self, network_type, vlan_tag, dst_mac):
table_id = self._dvr_to_src_mac_table_id(network_type)
(_dp, ofp, ofpp) = self._get_dp()
match = self._dvr_to_src_mac_match(ofp, ofpp,
vlan_tag=vlan_tag, dst_mac=dst_mac)
self.delete_flows(table_id=table_id, match=match)
def add_dvr_mac_vlan(self, mac, port):
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=4,
in_port=port,
eth_src=mac,
dest_table_id=constants.DVR_TO_SRC_MAC_VLAN)
def remove_dvr_mac_vlan(self, mac):
# REVISIT(yamamoto): match in_port as well?
self.delete_flows(table_id=constants.LOCAL_SWITCHING,
eth_src=mac)
def add_dvr_mac_tun(self, mac, port):
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=2,
in_port=port,
eth_src=mac,
dest_table_id=constants.DVR_TO_SRC_MAC)
def remove_dvr_mac_tun(self, mac, port):
self.delete_flows(table_id=constants.LOCAL_SWITCHING,
in_port=port, eth_src=mac)
@staticmethod
def _arp_reply_match(ofp, ofpp, port):
return ofpp.OFPMatch(in_port=port,
eth_type=ether_types.ETH_TYPE_ARP)
@staticmethod
def _icmpv6_reply_match(ofp, ofpp, port):
return ofpp.OFPMatch(in_port=port,
eth_type=ether_types.ETH_TYPE_IPV6,
ip_proto=in_proto.IPPROTO_ICMPV6,
icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT)
def install_icmpv6_na_spoofing_protection(self, port, ip_addresses):
# Allow neighbor advertisements as long as they match addresses
# that actually belong to the port.
for ip in ip_addresses:
masked_ip = self._cidr_to_ryu(ip)
self.install_normal(
table_id=constants.ARP_SPOOF_TABLE, priority=2,
eth_type=ether_types.ETH_TYPE_IPV6,
ip_proto=in_proto.IPPROTO_ICMPV6,
icmpv6_type=icmpv6.ND_NEIGHBOR_ADVERT,
ipv6_nd_target=masked_ip, in_port=port)
# Now that the rules are ready, direct icmpv6 neighbor advertisement
# traffic from the port into the anti-spoof table.
(_dp, ofp, ofpp) = self._get_dp()
match = self._icmpv6_reply_match(ofp, ofpp, port=port)
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=10,
match=match,
dest_table_id=constants.ARP_SPOOF_TABLE)
def set_allowed_macs_for_port(self, port, mac_addresses=None,
allow_all=False):
if allow_all:
self.delete_flows(table_id=constants.LOCAL_SWITCHING, in_port=port)
self.delete_flows(table_id=constants.MAC_SPOOF_TABLE, in_port=port)
return
mac_addresses = mac_addresses or []
for address in mac_addresses:
self.install_normal(
table_id=constants.MAC_SPOOF_TABLE, priority=2,
eth_src=address, in_port=port)
# normalize so we can see if macs are the same
mac_addresses = {netaddr.EUI(mac) for mac in mac_addresses}
flows = self.dump_flows(constants.MAC_SPOOF_TABLE)
for flow in flows:
matches = dict(flow.match.items())
if matches.get('in_port') != port:
continue
if not matches.get('eth_src'):
continue
flow_mac = matches['eth_src']
if netaddr.EUI(flow_mac) not in mac_addresses:
self.delete_flows(table_id=constants.MAC_SPOOF_TABLE,
in_port=port, eth_src=flow_mac)
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=9, in_port=port,
dest_table_id=constants.MAC_SPOOF_TABLE)
def install_arp_spoofing_protection(self, port, ip_addresses):
# allow ARP replies as long as they match addresses that actually
# belong to the port.
for ip in ip_addresses:
masked_ip = self._cidr_to_ryu(ip)
self.install_goto(table_id=constants.ARP_SPOOF_TABLE,
priority=2,
eth_type=ether_types.ETH_TYPE_ARP,
arp_spa=masked_ip,
in_port=port,
dest_table_id=constants.MAC_SPOOF_TABLE)
# Now that the rules are ready, direct ARP traffic from the port into
# the anti-spoof table.
# This strategy fails gracefully because OVS versions that can't match
# on ARP headers will just process traffic normally.
(_dp, ofp, ofpp) = self._get_dp()
match = self._arp_reply_match(ofp, ofpp, port=port)
self.install_goto(table_id=constants.LOCAL_SWITCHING,
priority=10,
match=match,
dest_table_id=constants.ARP_SPOOF_TABLE)
def delete_arp_spoofing_protection(self, port):
(_dp, ofp, ofpp) = self._get_dp()
match = self._arp_reply_match(ofp, ofpp, port=port)
self.delete_flows(table_id=constants.LOCAL_SWITCHING,
match=match)
match = self._icmpv6_reply_match(ofp, ofpp, port=port)
self.delete_flows(table_id=constants.LOCAL_SWITCHING,
match=match)
self.delete_arp_spoofing_allow_rules(port)
def delete_arp_spoofing_allow_rules(self, port):
self.delete_flows(table_id=constants.ARP_SPOOF_TABLE,
in_port=port)
| apache-2.0 |
nubark/odoo | addons/project_timesheet/report/project_report.py | 41 | 1178 | # -*- coding: utf-8 -*-
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields,osv
from openerp import tools
class report_project_task_user(osv.Model):
_inherit = "report.project.task.user"
_columns = {
'hours_planned': fields.float('Planned Hours', readonly=True),
'hours_effective': fields.float('Effective Hours', readonly=True),
'hours_delay': fields.float('Avg. Plan.-Eff.', readonly=True),
'remaining_hours': fields.float('Remaining Hours', readonly=True),
'progress': fields.float('Progress', readonly=True, group_operator='avg'),
'total_hours': fields.float('Total Hours', readonly=True),
}
def _select(self):
return super(report_project_task_user, self)._select() + ", progress as progress, t.effective_hours as hours_effective, remaining_hours as remaining_hours, total_hours as total_hours, t.delay_hours as hours_delay, planned_hours as hours_planned"
def _group_by(self):
return super(report_project_task_user, self)._group_by() + ", remaining_hours, t.effective_hours, progress, total_hours, planned_hours, hours_delay"
| gpl-3.0 |
resturp/Symilar | Window.py | 1 | 2443 | """module:: symilar/Window
:platform: Linix
:synopsis: Class for a sliding window over a string population winnow array.
.. moduleauthor:: Thomas Boose <thomas@boose.nl>
.. license:: Copyright 2014 Thomas Boose
thomas at boose dot nl.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Window(object):
class WindowMustHavePositiveSize(Exception):
pass
class WindowMustHavePositiveNoiseSize(Exception):
pass
def addChunk(self,chunk, linenumber):
self.chunks[self.pivot] = [chunk, linenumber]
self.pivot = (self.pivot + 1) % self.windowsize
if self.pivot == 0:
self.initialized = True
if self.initialized:
if self.chunkcounter % self.noise == 0:
minchunk = 'g000'
maxline = 0
for achunk in self.chunks:
if achunk[0] < minchunk:
minchunk = achunk[0]
maxline = achunk[1]
else:
if achunk[0] == minchunk:
if achunk[1] > maxline:
maxline = achunk[1]
if [minchunk,maxline] != self.lastMin:
self.fingerPrint.append([minchunk,maxline])
self.lastMin = [minchunk,maxline]
self.chunkcounter += 1
def __init__(self, guarantee=5, noise=1):
self.pivot = 0
self.noise = noise
if self.noise <=0:
raise self.WindowMustHavePositiveNoiseSize()
self.guarantee = guarantee
if guarantee < self.noise:
raise self.WindowMustHavePositiveSize()
self.windowsize = self.guarantee - self.noise + 1
self.chunks = [['',0]] * self.windowsize
self.lastMin = ['',0]
self.fingerPrint = []
self.chunkcounter = 0
self.initialized = False
| apache-2.0 |
mozilla/stoneridge | python/src/Lib/encodings/shift_jisx0213.py | 816 | 1059 | #
# shift_jisx0213.py: Python Unicode Codec for SHIFT_JISX0213
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('shift_jisx0213')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='shift_jisx0213',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mpl-2.0 |
vlinhd11/vlinhd11-android-scripting | python-build/python-libs/xmpppy/doc/examples/logger.py | 87 | 2608 | #!/usr/bin/python
# -*- coding: koi8-r -*-
from xmpp import *
import time,os
#BOT=(botjid,password)
BOT=('test@penza-gsm.ru','test')
#CONF=(confjid,password)
CONF=('talks@conference.jabber.ru','')
LOGDIR='./'
PROXY={}
#PROXY={'host':'192.168.0.1','port':3128,'username':'luchs','password':'secret'}
#######################################
def LOG(stanza,nick,text):
ts=stanza.getTimestamp()
if not ts:
ts=stanza.setTimestamp()
ts=stanza.getTimestamp()
tp=time.mktime(time.strptime(ts,'%Y%m%dT%H:%M:%S %Z'))+3600*3
if time.localtime()[-1]: tp+=3600
tp=time.localtime(tp)
fold=stanza.getFrom().getStripped().replace('@','%')+'_'+time.strftime("%Y.%m",tp)
day=time.strftime("%d",tp)
tm=time.strftime("%H:%M:%S",tp)
try: os.mkdir(LOGDIR+fold)
except: pass
fName='%s%s/%s.%s.html'%(LOGDIR,fold,fold,day)
try: open(fName)
except:
open(fName,'w').write("""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xml:lang="ru-RU" lang="ru-RU" xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta content="text/html; charset=utf-8" http-equiv="content-type" />
<title>%s logs for %s.%s.</title>
</head>
<body>
<table border="1"><tr><th>time</th><th>who</th><th>text</th></tr>
"""%(CONF[0],fold,day))
text='<pre>%s</pre>'%text
open(fName,'a').write((u"<tr><td>%s</td><td>%s</td><td>%s</td></tr>\n"%(tm,nick,text)).encode('utf-8'))
print (u"<tr><td>%s</td><td>%s</td><td>%s</td></tr>\n"%(tm,nick,text)).encode('koi8-r','replace')
# print time.localtime(tp),nick,text
def messageCB(sess,mess):
nick=mess.getFrom().getResource()
text=mess.getBody()
LOG(mess,nick,text)
roster=[]
def presenceCB(sess,pres):
nick=pres.getFrom().getResource()
text=''
if pres.getType()=='unavailable':
if nick in roster:
text=nick+unicode(' ÐÏËÉÎÕÌ ËÏÎÆÅÒÅÎÃÉÀ','koi8-r')
roster.remove(nick)
else:
if nick not in roster:
text=nick+unicode(' ÐÒÉÛ£Ì × ËÏÎÆÅÒÅÎÃÉÀ','koi8-r')
roster.append(nick)
if text: LOG(pres,nick,text)
if 1:
cl=Client(JID(BOT[0]).getDomain(),debug=[])
cl.connect(proxy=PROXY)
cl.RegisterHandler('message',messageCB)
cl.RegisterHandler('presence',presenceCB)
cl.auth(JID(BOT[0]).getNode(),BOT[1])
p=Presence(to='%s/logger'%CONF[0])
p.setTag('x',namespace=NS_MUC).setTagData('password',CONF[1])
p.getTag('x').addChild('history',{'maxchars':'0','maxstanzas':'0'})
cl.send(p)
while 1:
cl.Process(1)
| apache-2.0 |
anbangr/trusted-juju | juju/providers/ec2/__init__.py | 1 | 9518 | import os
import re
from twisted.internet.defer import inlineCallbacks, returnValue
from txaws.ec2.exception import EC2Error
from txaws.service import AWSServiceRegion
from txaws.util import parse as parse_url
from .utils import ssl
from juju.errors import (
MachinesNotFound, ProviderError, ProviderInteractionError)
from juju.providers.common.base import MachineProviderBase
from .files import FileStorage
from .launch import EC2LaunchMachine
from .machine import EC2ProviderMachine, machine_from_instance
from .securitygroup import (
open_provider_port, close_provider_port, get_provider_opened_ports,
remove_security_groups, destroy_environment_security_group)
from .utils import get_region_uri, log
from .utils import (
convert_zone, get_region_uri, DEFAULT_REGION, INSTANCE_TYPES, log)
class MachineProvider(MachineProviderBase):
"""MachineProvider for use in an EC2/S3 environment"""
def __init__(self, environment_name, config):
super(MachineProvider, self).__init__(environment_name, config)
if not config.get("ec2-uri"):
ec2_uri = get_region_uri(config.get("region", DEFAULT_REGION))
else:
ec2_uri = config.get("ec2-uri")
self._service = AWSServiceRegion(
access_key=config.get("access-key", ""),
secret_key=config.get("secret-key", ""),
ec2_uri=ec2_uri,
s3_uri=config.get("s3-uri", ""))
ssl_verify = self.config.get("ssl-hostname-verification", False)
if ssl and ssl_verify:
self._service.ec2_endpoint.ssl_hostname_verification = True
self._service.s3_endpoint.ssl_hostname_verification = True
elif ssl:
log.warn('ssl-hostname-verification is disabled for this environment')
else:
log.warn('txaws.client.ssl unavailable for SSL hostname verification')
ssl_verify = False
for endpoint, endpoint_type in [(self._service.ec2_endpoint,'EC2'),
(self._service.s3_endpoint,'S3')]:
if endpoint.scheme != 'https':
log.warn('%s API calls not using secure transport' % endpoint_type)
elif not ssl_verify:
log.warn('%s API calls encrypted but not authenticated' % endpoint_type)
if not ssl_verify:
log.warn('Ubuntu Cloud Image lookups encrypted but not authenticated')
self.s3 = self._service.get_s3_client()
self.ec2 = self._service.get_ec2_client()
@property
def provider_type(self):
return "ec2"
@property
def using_amazon(self):
return "ec2-uri" not in self.config
@inlineCallbacks
def get_constraint_set(self):
"""Return the set of constraints that are valid for this provider."""
cs = yield super(MachineProvider, self).get_constraint_set()
if 1: # These keys still need to be valid (instance-type and ec2-zone)
#if self.using_amazon:
# Expose EC2 instance types/zones on AWS itelf, not private clouds.
cs.register_generics(INSTANCE_TYPES.keys())
cs.register("ec2-zone", converter=convert_zone)
returnValue(cs)
def get_legacy_config_keys(self):
"""Return any deprecated config keys that are set"""
legacy = super(MachineProvider, self).get_legacy_config_keys()
if self.using_amazon:
# In the absence of a generic instance-type/image-id mechanism,
# these keys remain valid on private clouds.
amazon_legacy = set(("default-image-id", "default-instance-type"))
legacy.update(amazon_legacy.intersection(self.config))
return legacy
def get_serialization_data(self):
"""Get provider configuration suitable for serialization.
Also extracts credential information from the environment.
"""
data = super(MachineProvider, self).get_serialization_data()
data.setdefault("access-key", os.environ.get("AWS_ACCESS_KEY_ID"))
data.setdefault("secret-key", os.environ.get("AWS_SECRET_ACCESS_KEY"))
return data
def get_file_storage(self):
"""Retrieve an S3-backed :class:`FileStorage`."""
return FileStorage(self.s3, self.config["control-bucket"])
def start_machine(self, machine_data, master=False):
"""Start an EC2 machine.
:param dict machine_data: desired characteristics of the new machine;
it must include a "machine-id" key, and may include a "constraints"
key to specify the underlying OS and hardware.
:param bool master: if True, machine will initialize the juju admin
and run a provisioning agent, in addition to running a machine
agent.
"""
return EC2LaunchMachine.launch(self, machine_data, master)
@inlineCallbacks
def get_machines(self, instance_ids=()):
"""List machines running in the provider.
:param list instance_ids: ids of instances you want to get. Leave empty
to list every
:class:`juju.providers.ec2.machine.EC2ProviderMachine` owned by
this provider.
:return: a list of
:class:`juju.providers.ec2.machine.EC2ProviderMachine`
instances
:rtype: :class:`twisted.internet.defer.Deferred`
:raises: :exc:`juju.errors.MachinesNotFound`
"""
group_name = "juju-%s" % self.environment_name
try:
instances = yield self.ec2.describe_instances(*instance_ids)
except EC2Error as error:
code = error.get_error_codes()
message = error.get_error_messages()
if code == "InvalidInstanceID.NotFound":
message = error.get_error_messages()
raise MachinesNotFound(
re.findall(r"\bi-[0-9a-f]{3,15}\b", message))
raise ProviderInteractionError(
"Unexpected EC2Error getting machines %s: %s"
% (", ".join(instance_ids), message))
machines = []
for instance in instances:
if instance.instance_state not in ("running", "pending"):
continue
if group_name not in instance.reservation.groups:
continue
machines.append(machine_from_instance(instance))
if instance_ids:
# We were asked for a specific list of machines, and if we can't
# completely fulfil that request we should blow up.
found_instance_ids = set(m.instance_id for m in machines)
missing = set(instance_ids) - found_instance_ids
if missing:
raise MachinesNotFound(missing)
returnValue(machines)
@inlineCallbacks
def destroy_environment(self):
"""Terminate all associated machines and security groups.
The super defintion of this method terminates each machine in
the environment; this needs to be augmented here by also
removing the security group for the environment.
:rtype: :class:`twisted.internet.defer.Deferred`
"""
try:
killed_machines = yield super(MachineProvider, self).\
destroy_environment()
returnValue(killed_machines)
finally:
yield destroy_environment_security_group(self)
@inlineCallbacks
def shutdown_machines(self, machines):
"""Terminate machines associated with this provider.
:param machines: machines to shut down
:type machines: list of
:class:`juju.providers.ec2.machine.EC2ProviderMachine`
:return: list of terminated
:class:`juju.providers.ec2.machine.EC2ProviderMachine`
instances
:rtype: :class:`twisted.internet.defer.Deferred`
"""
if not machines:
returnValue([])
for machine in machines:
if not isinstance(machine, EC2ProviderMachine):
raise ProviderError("Can only shut down EC2ProviderMachines; "
"got a %r" % type(machine))
ids = [m.instance_id for m in machines]
killable_machines = yield self.get_machines(ids)
if not killable_machines:
returnValue([]) # Nothing to do
killable_ids = [m.instance_id for m in killable_machines]
terminated = yield self.ec2.terminate_instances(*killable_ids)
# Pass on what was actually terminated, in the case the
# machine has somehow disappeared since get_machines
# above. This is to avoid getting EC2Error: Error Message:
# Invalid id when running ec2.describe_instances in
# remove_security_groups
terminated_ids = [info[0] for info in terminated]
yield remove_security_groups(self, terminated_ids)
returnValue(killable_machines)
def open_port(self, machine, machine_id, port, protocol="tcp"):
"""Authorizes `port` using `protocol` on EC2 for `machine`."""
return open_provider_port(self, machine, machine_id, port, protocol)
def close_port(self, machine, machine_id, port, protocol="tcp"):
"""Revokes `port` using `protocol` on EC2 for `machine`."""
return close_provider_port(self, machine, machine_id, port, protocol)
def get_opened_ports(self, machine, machine_id):
"""Returns a set of open (port, proto) pairs for `machine`."""
return get_provider_opened_ports(self, machine, machine_id)
| agpl-3.0 |
ajayapra/shoebox-spybot | catkin_ws/src/object_tracking/src/scripts/joy_start.py | 1 | 2361 | #!/usr/bin/env python
# Use joystick input to launch object-tracking nodes in jackal
#
# Intro to Robotics - EE5900 - Spring 2017
# Assignment #6
#
# Project #6 Group #2
# Prithvi
# Aswin
# Akhil (Team Lead)
#
# version: v1.3
# define imports
import rospy
import roslaunch
import sys
import time
import os
from sensor_msgs.msg import Joy
# class to read joystick messages and launch node
class joy_control(object):
# define self routine
def __init__(self):
# define subscriber
rospy.Subscriber("/bluetooth_teleop/joy", Joy, self.joy_callback)
rate = rospy.Rate(5)
rospy.loginfo('started joystick routine..')
# define and init variables
self.start = False
self.stop = False
# configure node roslaunch api
package = 'object_tracking'
executable = 'tracker_proto.py'
node = roslaunch.core.Node(package, executable)
while not rospy.is_shutdown():
# if start flag set: launch main launch-file
if self.start:
launch = roslaunch.scriptapi.ROSLaunch()
launch.start()
process = launch.launch(node)
# if stop flag set: shutdown main launch-file
if self.stop:
process.stop()
# reset trigger
self.start = False
self.stop = False
rate.sleep()
# joystick callback routine
def joy_callback(self, data):
# define joystick buttons
x, circ, sq, tri, L1, R1, share, options, p4, L3, R3, DL, DR, DU, DD = data.buttons
llr, lud, L2, rlr, rud, R2 = data.axes
# Start object tracking
if (circ == 1) and (self.start == False):
rospy.loginfo("Starting the object tracking routine...")
# set the start flag
self.start = True
# Stop tracking
if (x == 1):
rospy.loginfo("Terminating the object tracking routine...")
# set stop flag
self.stop = True
# standard boilerplate
if __name__ == "__main__":
try:
rospy.init_node("joy_start", anonymous=False)
#read in joystick input
run = joy_control()
except rospy.ROSInterruptException:
rospy.loginfo("joy_start node terminated.")
| gpl-3.0 |
waseem18/oh-mainline | vendor/packages/twisted/doc/core/howto/listings/pb/copy_sender.py | 18 | 1497 | #!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.spread import pb, jelly
from twisted.python import log
from twisted.internet import reactor
class LilyPond:
def setStuff(self, color, numFrogs):
self.color = color
self.numFrogs = numFrogs
def countFrogs(self):
print "%d frogs" % self.numFrogs
class CopyPond(LilyPond, pb.Copyable):
pass
class Sender:
def __init__(self, pond):
self.pond = pond
def got_obj(self, remote):
self.remote = remote
d = remote.callRemote("takePond", self.pond)
d.addCallback(self.ok).addErrback(self.notOk)
def ok(self, response):
print "pond arrived", response
reactor.stop()
def notOk(self, failure):
print "error during takePond:"
if failure.type == jelly.InsecureJelly:
print " InsecureJelly"
else:
print failure
reactor.stop()
return None
def main():
from copy_sender import CopyPond # so it's not __main__.CopyPond
pond = CopyPond()
pond.setStuff("green", 7)
pond.countFrogs()
# class name:
print ".".join([pond.__class__.__module__, pond.__class__.__name__])
sender = Sender(pond)
factory = pb.PBClientFactory()
reactor.connectTCP("localhost", 8800, factory)
deferred = factory.getRootObject()
deferred.addCallback(sender.got_obj)
reactor.run()
if __name__ == '__main__':
main()
| agpl-3.0 |
GrimDerp/httpie | tests/test_stream.py | 49 | 1828 | import pytest
from httpie.compat import is_windows
from httpie.output.streams import BINARY_SUPPRESSED_NOTICE
from utils import http, TestEnvironment
from fixtures import BIN_FILE_CONTENT, BIN_FILE_PATH
class TestStream:
# GET because httpbin 500s with binary POST body.
@pytest.mark.skipif(is_windows,
reason='Pretty redirect not supported under Windows')
def test_pretty_redirected_stream(self, httpbin):
"""Test that --stream works with prettified redirected output."""
with open(BIN_FILE_PATH, 'rb') as f:
env = TestEnvironment(colors=256, stdin=f,
stdin_isatty=False,
stdout_isatty=False)
r = http('--verbose', '--pretty=all', '--stream', 'GET',
httpbin.url + '/get', env=env)
assert BINARY_SUPPRESSED_NOTICE.decode() in r
def test_encoded_stream(self, httpbin):
"""Test that --stream works with non-prettified
redirected terminal output."""
with open(BIN_FILE_PATH, 'rb') as f:
env = TestEnvironment(stdin=f, stdin_isatty=False)
r = http('--pretty=none', '--stream', '--verbose', 'GET',
httpbin.url + '/get', env=env)
assert BINARY_SUPPRESSED_NOTICE.decode() in r
def test_redirected_stream(self, httpbin):
"""Test that --stream works with non-prettified
redirected terminal output."""
with open(BIN_FILE_PATH, 'rb') as f:
env = TestEnvironment(stdout_isatty=False,
stdin_isatty=False,
stdin=f)
r = http('--pretty=none', '--stream', '--verbose', 'GET',
httpbin.url + '/get', env=env)
assert BIN_FILE_CONTENT in r
| bsd-3-clause |
unindented/streamcode | client/static/jsrepl/extern/python/closured/lib/python2.7/email/quoprimime.py | 246 | 10848 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield
# Contact: email-sig@python.org
"""Quoted-printable content transfer encoding per RFCs 2045-2047.
This module handles the content transfer encoding method defined in RFC 2045
to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to
safely encode text that is in a character set similar to the 7-bit US ASCII
character set, but that includes some 8-bit characters that are normally not
allowed in email bodies or headers.
Quoted-printable is very space-inefficient for encoding binary files; use the
email.base64mime module for that instead.
This module provides an interface to encode and decode both headers and bodies
with quoted-printable encoding.
RFC 2045 defines a method for including character set information in an
`encoded-word' in a header. This method is commonly used for 8-bit real names
in To:/From:/Cc: etc. fields, as well as Subject: lines.
This module does not do the line wrapping or end-of-line character
conversion necessary for proper internationalized headers; it only
does dumb encoding and decoding. To deal with the various line
wrapping issues, use the email.header module.
"""
__all__ = [
'body_decode',
'body_encode',
'body_quopri_check',
'body_quopri_len',
'decode',
'decodestring',
'encode',
'encodestring',
'header_decode',
'header_encode',
'header_quopri_check',
'header_quopri_len',
'quote',
'unquote',
]
import re
from string import hexdigits
from email.utils import fix_eols
CRLF = '\r\n'
NL = '\n'
# See also Charset.py
MISC_LEN = 7
hqre = re.compile(r'[^-a-zA-Z0-9!*+/ ]')
bqre = re.compile(r'[^ !-<>-~\t]')
# Helpers
def header_quopri_check(c):
"""Return True if the character should be escaped with header quopri."""
return bool(hqre.match(c))
def body_quopri_check(c):
"""Return True if the character should be escaped with body quopri."""
return bool(bqre.match(c))
def header_quopri_len(s):
"""Return the length of str when it is encoded with header quopri."""
count = 0
for c in s:
if hqre.match(c):
count += 3
else:
count += 1
return count
def body_quopri_len(str):
"""Return the length of str when it is encoded with body quopri."""
count = 0
for c in str:
if bqre.match(c):
count += 3
else:
count += 1
return count
def _max_append(L, s, maxlen, extra=''):
if not L:
L.append(s.lstrip())
elif len(L[-1]) + len(s) <= maxlen:
L[-1] += extra + s
else:
L.append(s.lstrip())
def unquote(s):
"""Turn a string in the form =AB to the ASCII character with value 0xab"""
return chr(int(s[1:3], 16))
def quote(c):
return "=%02X" % ord(c)
def header_encode(header, charset="iso-8859-1", keep_eols=False,
maxlinelen=76, eol=NL):
"""Encode a single header line with quoted-printable (like) encoding.
Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
used specifically for email header fields to allow charsets with mostly 7
bit characters (and some 8 bit) to remain more or less readable in non-RFC
2045 aware mail clients.
charset names the character set to use to encode the header. It defaults
to iso-8859-1.
The resulting string will be in the form:
"=?charset?q?I_f=E2rt_in_your_g=E8n=E8ral_dire=E7tion?\\n
=?charset?q?Silly_=C8nglish_Kn=EEghts?="
with each line wrapped safely at, at most, maxlinelen characters (defaults
to 76 characters). If maxlinelen is None, the entire string is encoded in
one chunk with no splitting.
End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
to the canonical email line separator \\r\\n unless the keep_eols
parameter is True (the default is False).
Each line of the header will be terminated in the value of eol, which
defaults to "\\n". Set this to "\\r\\n" if you are using the result of
this function directly in email.
"""
# Return empty headers unchanged
if not header:
return header
if not keep_eols:
header = fix_eols(header)
# Quopri encode each line, in encoded chunks no greater than maxlinelen in
# length, after the RFC chrome is added in.
quoted = []
if maxlinelen is None:
# An obnoxiously large number that's good enough
max_encoded = 100000
else:
max_encoded = maxlinelen - len(charset) - MISC_LEN - 1
for c in header:
# Space may be represented as _ instead of =20 for readability
if c == ' ':
_max_append(quoted, '_', max_encoded)
# These characters can be included verbatim
elif not hqre.match(c):
_max_append(quoted, c, max_encoded)
# Otherwise, replace with hex value like =E2
else:
_max_append(quoted, "=%02X" % ord(c), max_encoded)
# Now add the RFC chrome to each encoded chunk and glue the chunks
# together. BAW: should we be able to specify the leading whitespace in
# the joiner?
joiner = eol + ' '
return joiner.join(['=?%s?q?%s?=' % (charset, line) for line in quoted])
def encode(body, binary=False, maxlinelen=76, eol=NL):
"""Encode with quoted-printable, wrapping at maxlinelen characters.
If binary is False (the default), end-of-line characters will be converted
to the canonical email end-of-line sequence \\r\\n. Otherwise they will
be left verbatim.
Each line of encoded text will end with eol, which defaults to "\\n". Set
this to "\\r\\n" if you will be using the result of this function directly
in an email.
Each line will be wrapped at, at most, maxlinelen characters (defaults to
76 characters). Long lines will have the `soft linefeed' quoted-printable
character "=" appended to them, so the decoded text will be identical to
the original text.
"""
if not body:
return body
if not binary:
body = fix_eols(body)
# BAW: We're accumulating the body text by string concatenation. That
# can't be very efficient, but I don't have time now to rewrite it. It
# just feels like this algorithm could be more efficient.
encoded_body = ''
lineno = -1
# Preserve line endings here so we can check later to see an eol needs to
# be added to the output later.
lines = body.splitlines(1)
for line in lines:
# But strip off line-endings for processing this line.
if line.endswith(CRLF):
line = line[:-2]
elif line[-1] in CRLF:
line = line[:-1]
lineno += 1
encoded_line = ''
prev = None
linelen = len(line)
# Now we need to examine every character to see if it needs to be
# quopri encoded. BAW: again, string concatenation is inefficient.
for j in range(linelen):
c = line[j]
prev = c
if bqre.match(c):
c = quote(c)
elif j+1 == linelen:
# Check for whitespace at end of line; special case
if c not in ' \t':
encoded_line += c
prev = c
continue
# Check to see to see if the line has reached its maximum length
if len(encoded_line) + len(c) >= maxlinelen:
encoded_body += encoded_line + '=' + eol
encoded_line = ''
encoded_line += c
# Now at end of line..
if prev and prev in ' \t':
# Special case for whitespace at end of file
if lineno + 1 == len(lines):
prev = quote(prev)
if len(encoded_line) + len(prev) > maxlinelen:
encoded_body += encoded_line + '=' + eol + prev
else:
encoded_body += encoded_line + prev
# Just normal whitespace at end of line
else:
encoded_body += encoded_line + prev + '=' + eol
encoded_line = ''
# Now look at the line we just finished and it has a line ending, we
# need to add eol to the end of the line.
if lines[lineno].endswith(CRLF) or lines[lineno][-1] in CRLF:
encoded_body += encoded_line + eol
else:
encoded_body += encoded_line
encoded_line = ''
return encoded_body
# For convenience and backwards compatibility w/ standard base64 module
body_encode = encode
encodestring = encode
# BAW: I'm not sure if the intent was for the signature of this function to be
# the same as base64MIME.decode() or not...
def decode(encoded, eol=NL):
"""Decode a quoted-printable string.
Lines are separated with eol, which defaults to \\n.
"""
if not encoded:
return encoded
# BAW: see comment in encode() above. Again, we're building up the
# decoded string with string concatenation, which could be done much more
# efficiently.
decoded = ''
for line in encoded.splitlines():
line = line.rstrip()
if not line:
decoded += eol
continue
i = 0
n = len(line)
while i < n:
c = line[i]
if c != '=':
decoded += c
i += 1
# Otherwise, c == "=". Are we at the end of the line? If so, add
# a soft line break.
elif i+1 == n:
i += 1
continue
# Decode if in form =AB
elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits:
decoded += unquote(line[i:i+3])
i += 3
# Otherwise, not in form =AB, pass literally
else:
decoded += c
i += 1
if i == n:
decoded += eol
# Special case if original string did not end with eol
if not encoded.endswith(eol) and decoded.endswith(eol):
decoded = decoded[:-1]
return decoded
# For convenience and backwards compatibility w/ standard base64 module
body_decode = decode
decodestring = decode
def _unquote_match(match):
"""Turn a match in the form =AB to the ASCII character with value 0xab"""
s = match.group(0)
return unquote(s)
# Header decoding is done a bit differently
def header_decode(s):
"""Decode a string encoded with RFC 2045 MIME header `Q' encoding.
This function does not parse a full MIME header value encoded with
quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use
the high level email.header class for that functionality.
"""
s = s.replace('_', ' ')
return re.sub(r'=[a-fA-F0-9]{2}', _unquote_match, s)
| mit |
ganeshrn/ansible | lib/ansible/module_utils/distro/__init__.py | 15 | 2026 | # (c) 2018 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat distro library.
'''
# The following makes it easier for us to script updates of the bundled code
_BUNDLED_METADATA = {"pypi_name": "distro", "version": "1.5.0"}
# The following additional changes have been made:
# * Remove optparse since it is not needed for our use.
# * A format string including {} has been changed to {0} (py2.6 compat)
# * Port two calls from subprocess.check_output to subprocess.Popen().communicate() (py2.6 compat)
import sys
import types
try:
import distro as _system_distro
except ImportError:
_system_distro = None
else:
# There could be a 'distro' package/module that isn't what we expect, on the
# PYTHONPATH. Rather than erroring out in this case, just fall back to ours.
# We require more functions than distro.id(), but this is probably a decent
# test that we have something we can reasonably use.
if not hasattr(_system_distro, 'id') or \
not isinstance(_system_distro.id, types.FunctionType):
_system_distro = None
if _system_distro:
distro = _system_distro
else:
# Our bundled copy
from ansible.module_utils.distro import _distro as distro
sys.modules['ansible.module_utils.distro'] = distro
| gpl-3.0 |
muccg/rdrf | rdrf/rdrf/scripts/data_dictionary_generator.py | 1 | 13263 | # encoding: utf-8
import yaml
import sys
from string import strip
import codecs
def decode(l):
return map(lambda s: s.decode('utf-8'), l)
RANGE_DELIMITER = "|"
# COUNTRIES = decode(["Afghanistan", "Åland Islands", "Albania", "Algeria", "American Samoa", "Andorra", "Angola", "Anguilla", "Antarctica", "Antigua and Barbuda", "Argentina", "Armenia", "Aruba", "Australia", "Austria", "Azerbaijan", "Bahamas", "Bahrain", "Bangladesh", "Barbados", "Belarus", "Belgium", "Belize", "Benin", "Bermuda", "Bhutan", "Bolivia, Plurinational State of", "Bonaire, Sint Eustatius and Saba", "Bosnia and Herzegovina", "Botswana", "Bouvet Island", "Brazil", "British Indian Ocean Territory", "Brunei Darussalam", "Bulgaria", "Burkina Faso", "Burundi", "Cambodia", "Cameroon", "Canada", "Cape Verde", "Cayman Islands", "Central African Republic", "Chad", "Chile", "China", "Christmas Island", "Cocos (Keeling) Islands", "Colombia", "Comoros", "Congo", "Congo, The Democratic Republic of the", "Cook Islands", "Costa Rica", "Côte d'Ivoire", "Croatia", "Cuba", "Curaçao", "Cyprus", "Czech Republic", "Denmark", "Djibouti", "Dominica", "Dominican Republic", "Ecuador", "Egypt", "El Salvador", "Equatorial Guinea", "Eritrea", "Estonia", "Ethiopia", "Falkland Islands (Malvinas)", "Faroe Islands", "Fiji", "Finland", "France", "French Guiana", "French Polynesia", "French Southern Territories", "Gabon", "Gambia", "Georgia", "Germany", "Ghana", "Gibraltar", "Greece", "Greenland", "Grenada", "Guadeloupe", "Guam", "Guatemala", "Guernsey", "Guinea", "Guinea-Bissau", "Guyana", "Haiti", "Heard Island and McDonald Islands", "Holy See (Vatican City State)", "Honduras", "Hong Kong", "Hungary", "Iceland", "India", "Indonesia", "Iran, Islamic Republic of", "Iraq", "Ireland", "Isle of Man", "Israel", "Italy", "Jamaica", "Japan", "Jersey", "Jordan", "Kazakhstan", "Kenya", "Kiribati", "Korea, Democratic People's Republic of", "Korea, Republic of", "Kuwait", "Kyrgyzstan", "Lao People's Democratic Republic", "Latvia", "Lebanon", "Lesotho", "Liberia", "Libya", "Liechtenstein", "Lithuania", "Luxembourg",
# "Macao", "Macedonia, Republic of", "Madagascar", "Malawi", "Malaysia", "Maldives", "Mali", "Malta", "Marshall Islands", "Martinique", "Mauritania", "Mauritius", "Mayotte", "Mexico", "Micronesia, Federated States of", "Moldova, Republic of", "Monaco", "Mongolia", "Montenegro", "Montserrat", "Morocco", "Mozambique", "Myanmar", "Namibia", "Nauru", "Nepal", "Netherlands", "New Caledonia", "New Zealand", "Nicaragua", "Niger", "Nigeria", "Niue", "Norfolk Island", "Northern Mariana Islands", "Norway", "Oman", "Pakistan", "Palau", "Palestine, State of", "Panama", "Papua New Guinea", "Paraguay", "Peru", "Philippines", "Pitcairn", "Poland", "Portugal", "Puerto Rico", "Qatar", "Réunion", "Romania", "Russian Federation", "Rwanda", "Saint Barthélemy", "Saint Helena, Ascension and Tristan da Cunha", "Saint Kitts and Nevis", "Saint Lucia", "Saint Martin (French part)", "Saint Pierre and Miquelon", "Saint Vincent and the Grenadines", "Samoa", "San Marino", "Sao Tome and Principe", "Saudi Arabia", "Senegal", "Serbia", "Seychelles", "Sierra Leone", "Singapore", "Sint Maarten (Dutch part)", "Slovakia", "Slovenia", "Solomon Islands", "Somalia", "South Africa", "South Georgia and the South Sandwich Islands", "Spain", "Sri Lanka", "Sudan", "Suriname", "South Sudan", "Svalbard and Jan Mayen", "Swaziland", "Sweden", "Switzerland", "Syrian Arab Republic", "Taiwan, Province of China", "Tajikistan", "Tanzania, United Republic of", "Thailand", "Timor-Leste", "Togo", "Tokelau", "Tonga", "Trinidad and Tobago", "Tunisia", "Turkey", "Turkmenistan", "Turks and Caicos Islands", "Tuvalu", "Uganda", "Ukraine", "United Arab Emirates", "United Kingdom", "United States", "United States Minor Outlying Islands", "Uruguay", "Uzbekistan", "Vanuatu", "Venezuela, Bolivarian Republic of", "Viet Nam", "Virgin Islands, British", "Virgin Islands, U.S.", "Wallis and Futuna", "Western Sahara", "Yemen", "Zambia", "Zimbabwe"])
COUNTRIES = []
ETHNICITIES = [
"New Zealand European",
"Australian",
"Other Caucasian/European",
"Aboriginal",
"Person from the Torres Strait Islands",
"Maori",
"NZ European / Maori",
"Samoan",
"Cook Islands Maori",
"Tongan",
"Niuean",
"Tokelauan",
"Fijian",
"Other Pacific Peoples",
"Southeast Asian",
"Chinese",
"Indian",
"Other Asian",
"Middle Eastern",
"Latin American",
"Black African/African American",
"Other Ethnicity",
"Decline to Answer"]
SEXES = ["Male", "Female", "Indeterminate"]
LIVING_STATUSES = ["Living", "Deceased"]
AUS_STATES = ["ACT", "NSW", "NT", "QLD", "SA", "TAS", "VIC", "WA"]
BOOLEAN = ["True", "False"]
class DemographicForm:
SECTION_REGISTRY = "Registry"
SECTION_PATIENT_DETAILS = "Patients Personal Details"
HOME_ADDRESS = "Home Address"
PEDIGREE = "Pedigree"
class DemographicField(object):
def __init__(
self,
section,
name,
datatype="STRING",
members=[],
validation="",
required=False):
self.name = name
self.section = section
self._members = members
self.validation = validation
self.datatype = datatype
self.required = str(required)
if self.datatype == "DATE":
self.validation = "dd/mm/yyyy"
@property
def members(self):
if self.datatype == "RANGE":
return RANGE_DELIMITER.join(decode(self._members))
return ""
class CDEWrapper(object):
def __init__(self, data, cde_dict):
self.data = data
self.cde_dict = cde_dict
@property
def name(self):
return self.cde_dict["name"]
@property
def required(self):
return str(self.cde_dict["is_required"])
@property
def datatype(self):
return self.cde_dict["datatype"].strip().upper()
@property
def members(self):
if self.datatype == "RANGE":
return RANGE_DELIMITER.join(self._get_allowed_values())
else:
return ""
@property
def validation(self):
vals = []
if self.datatype == "STRING":
if self.cde_dict["max_length"]:
vals.append("Length <= %s" % self.cde_dict["max_length"])
if self.cde_dict["pattern"]:
vals.append("Must conform to regular expression %s" %
self.cde_dict["pattern"])
elif self.datatype == "INTEGER":
if self.cde_dict["min_value"]:
vals.append("Minimum value = %s" % self.cde_dict["min_value"])
if self.cde_dict["max_value"]:
vals.append("Maximum value = %s" % self.cde_dict["max_value"])
return ",".join(vals)
def _get_allowed_values(self):
pvg_code = self.cde_dict["pv_group"]
if pvg_code:
for pvg in self.data["pvgs"]:
if pvg_code == pvg["code"]:
display_values = []
for pv in pvg["values"]:
display_value = pv["value"]
# stored_value = pv["code"]
display_values.append(display_value)
return display_values
return []
else:
return []
class DataDefinitionReport(object):
def __init__(self, data, stream):
self.data = data
self.stream = stream
self.current_line = []
self.line_num = 1
self.delimiter = "\t"
def write_column(self, value):
self.current_line.append(value)
def new_line(self):
print("writing line: %s" % self.current_line)
# encoded = map(lambda s : s.decode('utf-8'), self.current_line)
line = self.delimiter.join(self.current_line)
line = line + "\n"
self.stream.write(line)
self.current_line = []
self.line_num += 1
def write_values(self, *values):
for value in values:
self.write_column(value)
self.new_line()
def write_header(self):
self.write_values("FIELDNUM", "FORM", "SECTION", "CDE",
"DATATYPE", "REQUIRED", "ALLOWED VALUES", "VALIDATION")
def _get_cdes_from_section(self, section_dict):
cdes = []
cde_codes = map(strip, section_dict["elements"])
for cde_code in cde_codes:
cde_dict = self._get_cde_dict(cde_code)
cde = CDEWrapper(self.data, cde_dict)
cdes.append(cde)
return cdes
def _get_cde_dict(self, cde_code):
for cde_dict in self.data["cdes"]:
if cde_dict["code"] == cde_code:
return cde_dict
def _get_demographic_fields(self):
fields = []
fields.append(DemographicField(
DemographicForm.SECTION_REGISTRY, "Centre", required=True))
# fields.append(DemographicField(DemographicForm.SECTION_REGISTRY, "Clinician"))
fields.append(DemographicField(
DemographicForm.SECTION_PATIENT_DETAILS, "Family name", required=True))
fields.append(DemographicField(
DemographicForm.SECTION_PATIENT_DETAILS, "Given names", required=True))
fields.append(DemographicField(
DemographicForm.SECTION_PATIENT_DETAILS, "Maiden name"))
fields.append(DemographicField(
DemographicForm.SECTION_PATIENT_DETAILS, "Hospital/Clinic ID"))
fields.append(DemographicField(
DemographicForm.SECTION_PATIENT_DETAILS, "Date of birth", "DATE", required=True))
fields.append(DemographicField(
DemographicForm.SECTION_PATIENT_DETAILS, "Country of birth", "RANGE", COUNTRIES))
fields.append(DemographicField(
DemographicForm.SECTION_PATIENT_DETAILS, "Ethnic Origin", "RANGE", ETHNICITIES))
fields.append(DemographicField(
DemographicForm.SECTION_PATIENT_DETAILS, "Sex", "RANGE", SEXES, required=True))
fields.append(DemographicField(
DemographicForm.SECTION_PATIENT_DETAILS, "Home Phone"))
fields.append(DemographicField(
DemographicForm.SECTION_PATIENT_DETAILS, "Mobile Phone"))
fields.append(DemographicField(
DemographicForm.SECTION_PATIENT_DETAILS, "Work Phone"))
fields.append(DemographicField(
DemographicForm.SECTION_PATIENT_DETAILS, "Email"))
fields.append(
DemographicField(
DemographicForm.SECTION_PATIENT_DETAILS,
"Living status",
"RANGE",
LIVING_STATUSES,
required=True))
fields.append(DemographicField(
DemographicForm.HOME_ADDRESS, "Address"))
fields.append(DemographicField(
DemographicForm.HOME_ADDRESS, "Suburb/Town"))
fields.append(DemographicField(
DemographicForm.HOME_ADDRESS, "State", "RANGE", AUS_STATES))
fields.append(DemographicField(
DemographicForm.HOME_ADDRESS, "Postcode"))
fields.append(DemographicField(
DemographicForm.HOME_ADDRESS, "Country", "RANGE", COUNTRIES))
return fields
def _get_consent_fields(self):
fields = []
def mk_consent(sec, field, required=False):
fields.append(DemographicField(sec, field, "RANGE", BOOLEAN, required=required))
mk_consent("FH Registry Consent", "Adult Consent")
mk_consent("FH Registry Consent", "Child Consent")
mk_consent("FH Optional Consents", "Clinical Trials")
mk_consent("FH Optional Consents", "Information")
mk_consent("FH Registry Subset", "FCHL")
mk_consent("FH Registry Subset", "Hyper-Lp(a)")
return fields
def __iter__(self):
col = 1
# first column is the oroginal patient ID in their system
yield "1", "NA", "NA", "YOURPATIENTID", "NA", "True", "", ""
col += 1
for demographic_field in self._get_demographic_fields():
yield str(col), "DEMOGRAPHICS", demographic_field.section, demographic_field.name, demographic_field.datatype, demographic_field.required, demographic_field.members, demographic_field.validation
col += 1
for field in self._get_consent_fields():
yield str(col), "CONSENTS", field.section, field.name, field.datatype, field.required, field.members, field.validation
col += 1
for form_dict in self.data["forms"]:
if form_dict["name"] == "FollowUp":
continue
for section_dict in form_dict["sections"]:
if not section_dict["allow_multiple"]:
cdes = self._get_cdes_from_section(section_dict)
for cde in cdes:
yield str(col), form_dict["name"], section_dict["display_name"], cde.name, cde.datatype, cde.required, cde.members, cde.validation
col += 1
yaml_file = sys.argv[1]
output_file = sys.argv[2]
with open(yaml_file) as yf:
data = yaml.load(yf)
f = codecs.open(output_file, mode="w", encoding="utf-8")
ddr = DataDefinitionReport(data, f)
ddr.write_header()
for items in ddr:
ddr.write_values(*items)
| agpl-3.0 |
dev-zero/cp2k-tools | cp2k_tools/generator/cli.py | 1 | 1081 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 ts=8 sw=4 tw=0 :
# Copyright (c) 2017 Tiziano Müller <tiziano.mueller@chem.uzh.ch>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import click
from cp2k_tools.generator import dict2cp2k
@click.command()
@click.argument('cp2k-json-file', type=click.File('r'))
def cli(cp2k_json_file):
"""Convert a CP2K JSON input to a native CP2K input file"""
struct = json.load(cp2k_json_file)
click.echo(dict2cp2k(struct))
| apache-2.0 |
roidelapluie/Lexpage | app/profile/forms.py | 2 | 5553 | from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm, SetPasswordForm
from commons.widgets import DateTimePicker
from django import forms
from .models import Profile
from captcha.fields import ReCaptchaField
import datetime
user_fields = ['first_name', 'last_name', 'email']
profile_fields = ['gender', 'birthdate', 'country', 'city', 'website_name', 'website_url', 'theme', 'avatar']
class ActivationForm(forms.Form):
key = forms.CharField(required=True, label='Clé d\'activation')
class MyPasswordChangeForm(PasswordChangeForm):
new_password1 = forms.CharField(widget=forms.PasswordInput,
required=True,
label='Nouveau mot de passe',
min_length=8)
class MyPasswordSetForm(SetPasswordForm):
new_password1 = forms.CharField(widget=forms.PasswordInput,
required=True,
label='Nouveau mot de passe',
min_length=8)
class ChangeProfileForm(forms.ModelForm):
first_name = forms.CharField(max_length=30, required=False, label='Prénom')
last_name = forms.CharField(max_length=30, required=False, label='Nom',
help_text='Cette information ne sera pas affichée publiquement.')
email = forms.EmailField(label='Adresse e-mail', required=True,
help_text='Cette information ne sera pas affichée publiquement.')
birthdate = forms.DateField(required=False, label='Date de naissance',
widget=DateTimePicker(options={'format': 'DD/MM/YYYY', 'pickTime': False}))
avatar_file = forms.FileField(required=False, label='Envoyer un avatar depuis votre disque',
help_text='Vous pouvez envoyer un avatar depuis votre disque. Il doit s\'agir d\'un fichier image, en .gif, .jpg ou .png. Votre ancien avatar sera automatiquement effacé et remplacé par le nouvel avatar.')
class Meta:
model = Profile
fields = user_fields + profile_fields + ['avatar_file']
def clean_avatar_file(self):
in_file = self.cleaned_data['avatar_file']
if not in_file:
return # No file, no clean ^^
try:
extension = in_file.name.rsplit('.', 1)[-1]
except IndexError:
extension = None
if not(extension in ['jpg', 'jpeg', 'gif', 'png']):
raise forms.ValidationError('Les seules extensions autorisées sont .jpg, .jpeg, .png et .gif.')
else:
self.cleaned_data['avatar_file_ext'] = extension
if in_file.size > 512 * 1024:
raise forms.ValidationError('L\'image doit être inférieure à 512ko.')
return self.cleaned_data['avatar_file']
def clean_birthdate(self):
if self.cleaned_data['birthdate']:
if (self.cleaned_data['birthdate'] > datetime.date.today() - datetime.timedelta(365*5)) \
or (self.cleaned_data['birthdate'] < datetime.date.today() - datetime.timedelta(365*100)):
raise forms.ValidationError('La date choisie n\'est pas cohérente.')
return self.cleaned_data['birthdate']
class LoginForm(AuthenticationForm):
remember_me = forms.BooleanField(initial=True, required=False, label='Rester connecté')
incognito = forms.BooleanField(initial=False, required=False, label='Mode incognito',
help_text='Cacher ma présence dans la liste des connectés.')
class RegistrationForm(forms.Form):
username = forms.RegexField(regex=r'^[\w.@+_-]+$',
min_length=3,
max_length=20,
required=True,
label='Nom d\'utilisateur',
error_messages={'invalid': 'Ce champ ne peut contenir que des lettres, des nombres et les caractères @/./+/-/_.'})
email = forms.EmailField(label='E-mail',
required=True)
password1 = forms.CharField(widget=forms.PasswordInput,
required=True,
label='Mot de passe',
min_length=8)
password2 = forms.CharField(widget=forms.PasswordInput,
required=True,
label='Confirmation')
captcha = ReCaptchaField(attrs={'theme': 'clean', 'lang': 'fr'})
def __init__(self, *args, **kwargs):
super(RegistrationForm, self).__init__(*args, **kwargs)
def clean_username(self):
existing = User.objects.filter(username__iexact=self.cleaned_data['username'])
if existing.exists():
raise forms.ValidationError('Ce nom d\'utilisateur est déjà utilisé.')
else:
return self.cleaned_data['username']
def clean_email(self):
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError('Cette adresse e-mail est déjà utilisée.')
return self.cleaned_data['email']
def clean(self):
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError('Les mots de passe ne correspondent pas.')
return self.cleaned_data
| gpl-3.0 |
chris-allan/openmicroscopy | components/tools/registry/whois.py | 4 | 4213 | #!/usr/bin/env python
#
# OMERO Registry Whois Database
# Copyright 2007 Glencoe Software, Inc. All Rights Reserved.
#
try:
from pysqlite2 import dbapi2 as sqlite
except:
from sqlite3 import dbapi2 as sqlite
from pprint import pprint
import os, sys, exceptions
class whoisdb:
def __create__(self, dbname='whois.db'):
conn = sqlite.connect(dbname)
try:
cursor = conn.cursor()
cursor.execute('CREATE TABLE whois (id varchar(15) PRIMARY KEY, whois text)')
conn.commit()
finally:
conn.close()
def __init__(self, dbname='whois.db'):
if not os.path.exists(dbname):
self.__create__(dbname)
self.conn = sqlite.connect(dbname)
def __close__(self):
self.conn.close()
def __iter__(self):
return iter(self.conn)
def __values__(self):
c = self.conn.cursor()
c.execute('SELECT whois FROM whois')
return [ value[0] for value in c ]
def get(self, ip):
c = self.conn.execute('SELECT id, whois FROM whois WHERE id = ?', (ip,))
rv = c.fetchone()
c.close()
return rv
def set(self, ip, whois, commit = None):
c = self.conn.cursor()
c.execute('INSERT INTO whois VALUES (?,?)', (ip, whois))
if commit:
self.conn.commit()
def update(self, ip, whois, commit = None):
c = self.conn.cursor()
c.execute('UPDATE whois SET whois = ? WHERE id = ?', (whois, ip))
if commit:
self.conn.commit()
def missing(self):
for line in sys.stdin:
line = line.strip()
parts = line.split()
if self.get(parts[0]) == None:
if not parts[0].startswith("10."):
print parts[0]
def lookup(self):
from socket import gethostbyaddr as ghba
from socket import herror
for line in sys.stdin:
line = line.strip()
parts = line.split()
ip = parts[0]
if ip.endswith(","):
ip = ip[:-1]
try:
print "%s\t" % ip,
print ghba(ip)[0]
except herror:
print "Error"
def load(self):
seen = {}
for line in sys.stdin:
line = line.strip()
parts = line.split("\t")
if not parts[1] == "\"\"":
if seen.has_key(parts[0]):
print "Already seen %s (%s) new value %s" % (parts[0],seen[parts[0]], parts[1])
else:
seen[parts[0]] = parts[1]
try:
self.set(parts[0], parts[1])
except exceptions.Exception, e:
print "Failed to insert %s (%s)" % (parts[0], e)
self.conn.commit()
def values(self):
for each in self.__values__():
print each
def report(self, level = "2", filter = "0"):
lvl = int(level)
flt = int(filter)
fmt = ".".join( [ "%s" for i in range(lvl) ] )
all = {}
for each in self.__values__():
parts = each.split(".")
used = []
for i in range(lvl):
try:
used.append( parts[-1 * (i+1)] )
except IndexError:
used.append(" ")
key = fmt % tuple(used)
if not all.has_key(key):
all[key] = 1
else:
all[key] += 1
for k,v in all.items():
if v > flt:
print "%-64s\t%s" % (k,v)
def correct(self):
ips = set()
for i in self:
if 0 <= i[1].find("Resolves correctly"):
ips.add(i[0])
for ip in ips:
t = self.get(ip)
m = t[1].split()
if m[2] != "correctly" or m[1] != "Resolves":
print "Failed"
else:
self.update(ip, m[0])
self.conn.commit()
def lower(self):
for i in self:
self.update(i[0],i[1].lower())
self.conn.commit()
class iter:
def __init__(self, conn):
self.cursor = conn.cursor()
self.cursor.execute('SELECT id, whois FROM whois')
def __iter__(self):
return self.cursor
def next(self):
return self.cursor.next()
if __name__ == "__main__":
db = whoisdb()
try:
if len(sys.argv) == 1:
for ip in db:
print "%-15s\t%s" % (ip[0],ip[1])
else:
arg = list(sys.argv[1:])
cmd = arg.pop(0)
getattr(db, cmd)(*arg)
finally:
db.__close__()
| gpl-2.0 |
Imaginashion/cloud-vision | .fr-d0BNfn/django-jquery-file-upload/venv/lib/python3.5/site-packages/django/core/management/utils.py | 405 | 2590 | from __future__ import unicode_literals
import os
import sys
from subprocess import PIPE, Popen
from django.utils import six
from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_text
from .base import CommandError
def popen_wrapper(args, os_err_exc_type=CommandError, universal_newlines=True):
"""
Friendly wrapper around Popen.
Returns stdout output, stderr output and OS status code.
"""
try:
p = Popen(args, shell=False, stdout=PIPE, stderr=PIPE,
close_fds=os.name != 'nt', universal_newlines=universal_newlines)
except OSError as e:
strerror = force_text(e.strerror, DEFAULT_LOCALE_ENCODING,
strings_only=True)
six.reraise(os_err_exc_type, os_err_exc_type('Error executing %s: %s' %
(args[0], strerror)), sys.exc_info()[2])
output, errors = p.communicate()
return (
output,
force_text(errors, DEFAULT_LOCALE_ENCODING, strings_only=True),
p.returncode
)
def handle_extensions(extensions):
"""
Organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times.
For example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in an extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
{'.html', '.js', '.py'}
>>> handle_extensions(['.html, txt,.tpl'])
{'.html', '.tpl', '.txt'}
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set(ext_list)
def find_command(cmd, path=None, pathext=None):
if path is None:
path = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(path, six.string_types):
path = [path]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD').split(os.pathsep)
# don't use extensions if the command ends with one of them
for ext in pathext:
if cmd.endswith(ext):
pathext = ['']
break
# check if we find the command on PATH
for p in path:
f = os.path.join(p, cmd)
if os.path.isfile(f):
return f
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
| mit |
henryfjordan/django | django/contrib/auth/migrations/0001_initial.py | 200 | 5063 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.auth.models
from django.core import validators
from django.db import migrations, models
from django.utils import timezone
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='Permission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='name')),
('content_type', models.ForeignKey(
to='contenttypes.ContentType',
on_delete=models.CASCADE,
to_field='id',
verbose_name='content type',
)),
('codename', models.CharField(max_length=100, verbose_name='codename')),
],
options={
'ordering': ('content_type__app_label', 'content_type__model', 'codename'),
'unique_together': set([('content_type', 'codename')]),
'verbose_name': 'permission',
'verbose_name_plural': 'permissions',
},
managers=[
('objects', django.contrib.auth.models.PermissionManager()),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=80, verbose_name='name')),
('permissions', models.ManyToManyField(to='auth.Permission', verbose_name='permissions', blank=True)),
],
options={
'verbose_name': 'group',
'verbose_name_plural': 'groups',
},
managers=[
('objects', django.contrib.auth.models.GroupManager()),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(
default=False,
help_text='Designates that this user has all permissions without explicitly assigning them.',
verbose_name='superuser status'
)),
('username', models.CharField(
help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True,
max_length=30, verbose_name='username',
validators=[validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')]
)),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(
default=False, help_text='Designates whether the user can log into this admin site.',
verbose_name='staff status'
)),
('is_active', models.BooleanField(
default=True, verbose_name='active', help_text=(
'Designates whether this user should be treated as active. Unselect this instead of deleting '
'accounts.'
)
)),
('date_joined', models.DateTimeField(default=timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(
to='auth.Group', verbose_name='groups', blank=True, related_name='user_set',
related_query_name='user', help_text=(
'The groups this user belongs to. A user will get all permissions granted to each of their '
'groups.'
)
)),
('user_permissions', models.ManyToManyField(
to='auth.Permission', verbose_name='user permissions', blank=True,
help_text='Specific permissions for this user.', related_name='user_set',
related_query_name='user')
),
],
options={
'swappable': 'AUTH_USER_MODEL',
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| bsd-3-clause |
ShassAro/ShassAro | Bl_project/blVirtualEnv/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/schema.py | 36 | 3619 | from django.db.backends.schema import BaseDatabaseSchemaEditor
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_create_sequence = "CREATE SEQUENCE %(sequence)s"
sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE"
sql_set_sequence_max = "SELECT setval('%(sequence)s', MAX(%(column)s)) FROM %(table)s"
sql_create_varchar_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s varchar_pattern_ops)%(extra)s"
sql_create_text_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s text_pattern_ops)%(extra)s"
def quote_value(self, value):
# Inner import so backend fails nicely if it's not present
import psycopg2
return psycopg2.extensions.adapt(value)
def _model_indexes_sql(self, model):
output = super(DatabaseSchemaEditor, self)._model_indexes_sql(model)
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return output
for field in model._meta.local_fields:
db_type = field.db_type(connection=self.connection)
if db_type is not None and (field.db_index or field.unique):
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
if db_type.startswith('varchar'):
output.append(self._create_index_sql(
model, [field], suffix='_like', sql=self.sql_create_varchar_index))
elif db_type.startswith('text'):
output.append(self._create_index_sql(
model, [field], suffix='_like', sql=self.sql_create_text_index))
return output
def _alter_column_type_sql(self, table, column, type):
"""
Makes ALTER TYPE with SERIAL make sense.
"""
if type.lower() == "serial":
sequence_name = "%s_%s_seq" % (table, column)
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(column),
"type": "integer",
},
[],
),
[
(
self.sql_delete_sequence % {
"sequence": sequence_name,
},
[],
),
(
self.sql_create_sequence % {
"sequence": sequence_name,
},
[],
),
(
self.sql_alter_column % {
"table": table,
"changes": self.sql_alter_column_default % {
"column": column,
"default": "nextval('%s')" % sequence_name,
}
},
[],
),
(
self.sql_set_sequence_max % {
"table": table,
"column": column,
"sequence": sequence_name,
},
[],
),
],
)
else:
return super(DatabaseSchemaEditor, self)._alter_column_type_sql(table, column, type)
| gpl-2.0 |
nens/raster-tools | raster_tools/groups.py | 1 | 5563 | # (c) Nelen & Schuurmans, see LICENSE.rst.
# -*- coding: utf-8 -*-
import logging
from osgeo import gdal
from osgeo import gdal_array
from osgeo import ogr
from osgeo import osr
import numpy as np
from raster_tools import datasets
from raster_tools import utils
logger = logging.getLogger(__name__)
class Meta(object):
def __init__(self, dataset):
band = dataset.GetRasterBand(1)
data_type = band.DataType
numpy_type = gdal_array.GDALTypeCodeToNumericTypeCode(data_type)
# compared
self.width = dataset.RasterXSize
self.height = dataset.RasterYSize
self.data_type = data_type
self.projection = dataset.GetProjection()
self.geo_transform = dataset.GetGeoTransform()
# not compared
self.dtype = np.dtype(numpy_type)
no_data_value = band.GetNoDataValue()
self.no_data_value = numpy_type(no_data_value)
def __eq__(self, other):
return (self.width == other.width
and self.height == other.height
and self.data_type == other.data_type
and self.projection == other.projection
and self.geo_transform == other.geo_transform)
class Group(object):
"""
A group of gdal rasters, automatically merges, and has a more pythonic
interface.
"""
def __init__(self, *datasets):
metas = [Meta(dataset) for dataset in datasets]
meta = metas[0]
if not all([meta == m for m in metas]):
raise ValueError('Incompatible rasters.')
self.dtype = meta.dtype
self.width = meta.width
self.height = meta.height
self.projection = meta.projection
self.no_data_value = meta.no_data_value
self.geo_transform = utils.GeoTransform(meta.geo_transform)
self.no_data_values = [m.no_data_value for m in metas]
self.datasets = datasets
def read(self, bounds, inflate=False):
"""
Return numpy array.
:param bounds: x1, y1, x2, y2 window in pixels, or an ogr geometry
:param inflate: inflate envelope to grid, to make sure that
the entire geometry is contained in resulting indices.
If the bounds fall outside the dataset, the result is padded
with no data values.
"""
# find indices
if isinstance(bounds, ogr.Geometry):
x1, y1, x2, y2 = self.geo_transform.get_indices(bounds,
inflate=inflate)
else:
x1, y1, x2, y2 = bounds
# overlapping bounds
w, h = self.width, self.height
p1 = min(w, max(0, x1))
q1 = min(h, max(0, y1))
p2 = min(w, max(0, x2))
q2 = min(h, max(0, y2))
# result array plus a view for what's actually inside datasets
array = np.full((y2 - y1, x2 - x1), self.no_data_value, self.dtype)
view = array[q1 - y1: q2 - y1, p1 - x1: p2 - x1]
kwargs = {'xoff': p1, 'yoff': q1, 'xsize': p2 - p1, 'ysize': q2 - q1}
for dataset, no_data_value in zip(self.datasets, self.no_data_values):
data = dataset.ReadAsArray(**kwargs)
index = data != no_data_value
view[index] = data[index]
return array
class RGBWrapper(object):
"""
A wrapper around GDAL RGB datasets for pythonic querying.
"""
def __init__(self, dataset):
self.dataset = dataset
self.width = dataset.RasterXSize
self.height = dataset.RasterYSize
self.projection = dataset.GetProjection()
self.geo_transform = utils.GeoTransform(dataset.GetGeoTransform())
def get_mask(self, geometry, shape):
# create an ogr datasource
driver = ogr.GetDriverByName('Memory')
source = driver.CreateDataSource('')
sr = osr.SpatialReference(self.projection)
layer = source.CreateLayer('', sr)
defn = layer.GetLayerDefn()
feature = ogr.Feature(defn)
feature.SetGeometry(geometry)
layer.CreateFeature(feature)
# burn where data should be
mask = np.zeros(shape, dtype='u1')
geo_transform = self.geo_transform.shifted(geometry)
kwargs = {'geo_transform': geo_transform,
'projection': self.projection}
with datasets.Dataset(mask, **kwargs) as dataset:
gdal.RasterizeLayer(dataset, (1,), layer, burn_values=(1,))
return mask.astype('b1').repeat(3, axis=0)
def read(self, geometry):
"""
Return numpy array.
bounds: x1, y1, x2, y2 pixel window specifcation, or an ogr geometry
If the bounds fall outside the dataset, the result is padded
with no data values.
"""
# find indices
x1, y1, x2, y2 = self.geo_transform.get_indices(geometry)
# overlapping bounds
w, h = self.width, self.height
p1 = min(w, max(0, x1))
q1 = min(h, max(0, y1))
p2 = min(w, max(0, x2))
q2 = min(h, max(0, y2))
# result array plus a view for what's actually inside datasets
data = np.full((3, y2 - y1, x2 - x1), 0, 'u1')
view = data[:, q1 - y1: q2 - y1, p1 - x1: p2 - x1]
# query the data and put it in the view
kwargs = {'xoff': p1, 'yoff': q1, 'xsize': p2 - p1, 'ysize': q2 - q1}
view[:] = self.dataset.ReadAsArray(**kwargs)
# create a mask
shape = (1,) + data.shape[1:]
mask = self.get_mask(geometry=geometry, shape=shape)
return data, mask
| gpl-3.0 |
anibali/pywebp | webp_build/builder.py | 1 | 1460 | import json
import platform
import tempfile
from os import path, getcwd
from cffi import FFI
from conans.client import conan_api
from importlib_resources import read_text
conan, _, _ = conan_api.ConanAPIV1.factory()
# Use Conan to install libwebp
with tempfile.TemporaryDirectory() as tmp_dir:
conan.install(path=getcwd(), cwd=tmp_dir)
with open(path.join(tmp_dir, 'conanbuildinfo.json'), 'r') as f:
conan_info = json.load(f)
# Find header files and libraries in libwebp
extra_objects = []
include_dirs = []
libraries = []
for dep in conan_info['dependencies']:
for lib_name in dep['libs']:
if platform.system() == 'Windows':
lib_filename = '{}.lib'.format(lib_name)
else:
lib_filename = 'lib{}.a'.format(lib_name)
for lib_path in dep['lib_paths']:
candidate = path.join(lib_path, lib_filename)
if path.isfile(candidate):
extra_objects.append(candidate)
else:
libraries.append(lib_name)
for include_path in dep['include_paths']:
include_dirs.append(include_path)
# Specify C sources to be build by CFFI
ffibuilder = FFI()
ffibuilder.set_source(
'_webp',
read_text('webp_build', 'source.c'),
extra_objects=extra_objects,
include_dirs=include_dirs,
libraries=libraries,
)
ffibuilder.cdef(read_text('webp_build', 'cdef.h'))
if __name__ == '__main__':
ffibuilder.compile(verbose=True)
| mit |
dgnorth/drift | drift/management/commands/ssh.py | 1 | 2926 | # -*- coding: utf-8 -*-
import sys
import os
import os.path
import subprocess
import boto3
from click import echo
from six import print_
from six.moves import input
from drift.utils import get_config
def get_options(parser):
parser.add_argument("service", help="Service or deployable to connect to", nargs='?')
def run_command(args):
service = args.service
conf = get_config()
tier_name = conf.tier['tier_name']
region = conf.tier['aws']['region']
ssh_key_name = conf.tier['aws']['ssh_key']
deployables = conf.table_store.get_table('deployables').find({"tier_name": tier_name})
deployables = {depl["deployable_name"]: depl for depl in deployables}
if service is None:
echo("Select an instance to connect to:")
for k in sorted(deployables.keys()):
echo(" {}".format(k))
return
elif service not in deployables:
echo("Warning! Service or deployable '{}' not one of {}.".format(service, ", ".join(deployables.keys())))
ssh_key_file = os.path.expanduser('~/.ssh/{}.pem'.format(ssh_key_name))
# Get IP address of any instance of this deployable.
sess = boto3.session.Session(region_name=region)
ec2 = sess.client("ec2")
filters = [{"Name": "instance-state-name", "Values": ["running"]},
{"Name": "tag:tier", "Values": [tier_name]},
{"Name": "tag:service-name", "Values": [service]},
]
echo("Getting a list of EC2's from AWS matching the following criteria:")
for criteria in filters:
echo(" {} = {}".format(criteria["Name"], criteria["Values"][0]))
ret = ec2.describe_instances(Filters=filters)
instances = []
for res in ret["Reservations"]:
instances += res["Instances"]
if not instances:
echo("No instance found which matches the criteria.")
return
echo("Instances:")
inst = instances[0]
for i, ins in enumerate(instances):
lb = [tag["Value"] for tag in ins["Tags"] if tag["Key"] == "launched-by"] or ["n/a"]
echo(" {}: {} at {} launched by {} on {}".format(
i + 1, ins["InstanceId"], ins["PrivateIpAddress"], lb[0], ins["LaunchTime"]))
if len(instances) > 1:
which = input("Select an instance to connect to (or press enter for first one): ")
if which:
inst = instances[int(which) - 1]
else:
echo("Only one instance available. Connecting to it immediately..")
ip_address = inst["PrivateIpAddress"]
cd_cmd = ""
if service in deployables:
cd_cmd = 'cd /etc/opt/{}; exec bash --login'.format(service)
cmd = ["ssh", "ubuntu@{}".format(ip_address), "-i", ssh_key_file, "-t", cd_cmd]
echo("\nSSH command: " + " ".join(cmd))
p = subprocess.Popen(cmd)
stdout, _ = p.communicate()
if p.returncode != 0:
if stdout:
print_(stdout.decode())
sys.exit(p.returncode)
| mit |
smilusingjavascript/blink | Tools/Scripts/webkitpy/common/message_pool.py | 61 | 12097 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module for handling messages and concurrency for run-webkit-tests
and test-webkitpy. This module follows the design for multiprocessing.Pool
and concurrency.futures.ProcessPoolExecutor, with the following differences:
* Tasks are executed in stateful subprocesses via objects that implement the
Worker interface - this allows the workers to share state across tasks.
* The pool provides an asynchronous event-handling interface so the caller
may receive events as tasks are processed.
If you don't need these features, use multiprocessing.Pool or concurrency.futures
intead.
"""
import cPickle
import logging
import multiprocessing
import Queue
import sys
import time
import traceback
from webkitpy.common.host import Host
from webkitpy.common.system import stack_utils
_log = logging.getLogger(__name__)
def get(caller, worker_factory, num_workers, host=None):
"""Returns an object that exposes a run() method that takes a list of test shards and runs them in parallel."""
return _MessagePool(caller, worker_factory, num_workers, host)
class _MessagePool(object):
def __init__(self, caller, worker_factory, num_workers, host=None):
self._caller = caller
self._worker_factory = worker_factory
self._num_workers = num_workers
self._workers = []
self._workers_stopped = set()
self._host = host
self._name = 'manager'
self._running_inline = (self._num_workers == 1)
if self._running_inline:
self._messages_to_worker = Queue.Queue()
self._messages_to_manager = Queue.Queue()
else:
self._messages_to_worker = multiprocessing.Queue()
self._messages_to_manager = multiprocessing.Queue()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self._close()
return False
def run(self, shards):
"""Posts a list of messages to the pool and waits for them to complete."""
for message in shards:
self._messages_to_worker.put(_Message(self._name, message[0], message[1:], from_user=True, logs=()))
for _ in xrange(self._num_workers):
self._messages_to_worker.put(_Message(self._name, 'stop', message_args=(), from_user=False, logs=()))
self.wait()
def _start_workers(self):
assert not self._workers
self._workers_stopped = set()
host = None
if self._running_inline or self._can_pickle(self._host):
host = self._host
for worker_number in xrange(self._num_workers):
worker = _Worker(host, self._messages_to_manager, self._messages_to_worker, self._worker_factory, worker_number, self._running_inline, self if self._running_inline else None, self._worker_log_level())
self._workers.append(worker)
worker.start()
def _worker_log_level(self):
log_level = logging.NOTSET
for handler in logging.root.handlers:
if handler.level != logging.NOTSET:
if log_level == logging.NOTSET:
log_level = handler.level
else:
log_level = min(log_level, handler.level)
return log_level
def wait(self):
try:
self._start_workers()
if self._running_inline:
self._workers[0].run()
self._loop(block=False)
else:
self._loop(block=True)
finally:
self._close()
def _close(self):
for worker in self._workers:
if worker.is_alive():
worker.terminate()
worker.join()
self._workers = []
if not self._running_inline:
# FIXME: This is a hack to get multiprocessing to not log tracebacks during shutdown :(.
multiprocessing.util._exiting = True
if self._messages_to_worker:
self._messages_to_worker.close()
self._messages_to_worker = None
if self._messages_to_manager:
self._messages_to_manager.close()
self._messages_to_manager = None
def _log_messages(self, messages):
for message in messages:
logging.root.handle(message)
def _handle_done(self, source):
self._workers_stopped.add(source)
@staticmethod
def _handle_worker_exception(source, exception_type, exception_value, _):
if exception_type == KeyboardInterrupt:
raise exception_type(exception_value)
raise WorkerException(str(exception_value))
def _can_pickle(self, host):
try:
cPickle.dumps(host)
return True
except TypeError:
return False
def _loop(self, block):
try:
while True:
if len(self._workers_stopped) == len(self._workers):
block = False
message = self._messages_to_manager.get(block)
self._log_messages(message.logs)
if message.from_user:
self._caller.handle(message.name, message.src, *message.args)
continue
method = getattr(self, '_handle_' + message.name)
assert method, 'bad message %s' % repr(message)
method(message.src, *message.args)
except Queue.Empty:
pass
class WorkerException(BaseException):
"""Raised when we receive an unexpected/unknown exception from a worker."""
pass
class _Message(object):
def __init__(self, src, message_name, message_args, from_user, logs):
self.src = src
self.name = message_name
self.args = message_args
self.from_user = from_user
self.logs = logs
def __repr__(self):
return '_Message(src=%s, name=%s, args=%s, from_user=%s, logs=%s)' % (self.src, self.name, self.args, self.from_user, self.logs)
class _Worker(multiprocessing.Process):
def __init__(self, host, messages_to_manager, messages_to_worker, worker_factory, worker_number, running_inline, manager, log_level):
super(_Worker, self).__init__()
self.host = host
self.worker_number = worker_number
self.name = 'worker/%d' % worker_number
self.log_messages = []
self.log_level = log_level
self._running = False
self._running_inline = running_inline
self._manager = manager
self._messages_to_manager = messages_to_manager
self._messages_to_worker = messages_to_worker
self._worker = worker_factory(self)
self._logger = None
self._log_handler = None
def terminate(self):
if self._worker:
if hasattr(self._worker, 'stop'):
self._worker.stop()
self._worker = None
if self.is_alive():
super(_Worker, self).terminate()
def _close(self):
if self._log_handler and self._logger:
self._logger.removeHandler(self._log_handler)
self._log_handler = None
self._logger = None
def start(self):
if not self._running_inline:
super(_Worker, self).start()
def run(self):
if not self.host:
self.host = Host()
if not self._running_inline:
self._set_up_logging()
worker = self._worker
exception_msg = ""
_log.debug("%s starting" % self.name)
self._running = True
try:
if hasattr(worker, 'start'):
worker.start()
while self._running:
message = self._messages_to_worker.get()
if message.from_user:
worker.handle(message.name, message.src, *message.args)
self._yield_to_manager()
else:
assert message.name == 'stop', 'bad message %s' % repr(message)
break
_log.debug("%s exiting" % self.name)
except Queue.Empty:
assert False, '%s: ran out of messages in worker queue.' % self.name
except KeyboardInterrupt, e:
self._raise(sys.exc_info())
except Exception, e:
self._raise(sys.exc_info())
finally:
try:
if hasattr(worker, 'stop'):
worker.stop()
finally:
self._post(name='done', args=(), from_user=False)
self._close()
def stop_running(self):
self._running = False
def post(self, name, *args):
self._post(name, args, from_user=True)
self._yield_to_manager()
def _yield_to_manager(self):
if self._running_inline:
self._manager._loop(block=False)
def _post(self, name, args, from_user):
log_messages = self.log_messages
self.log_messages = []
self._messages_to_manager.put(_Message(self.name, name, args, from_user, log_messages))
def _raise(self, exc_info):
exception_type, exception_value, exception_traceback = exc_info
if self._running_inline:
raise exception_type, exception_value, exception_traceback
if exception_type == KeyboardInterrupt:
_log.debug("%s: interrupted, exiting" % self.name)
stack_utils.log_traceback(_log.debug, exception_traceback)
else:
_log.error("%s: %s('%s') raised:" % (self.name, exception_value.__class__.__name__, str(exception_value)))
stack_utils.log_traceback(_log.error, exception_traceback)
# Since tracebacks aren't picklable, send the extracted stack instead.
stack = traceback.extract_tb(exception_traceback)
self._post(name='worker_exception', args=(exception_type, exception_value, stack), from_user=False)
def _set_up_logging(self):
self._logger = logging.getLogger()
# The unix multiprocessing implementation clones any log handlers into the child process,
# so we remove them to avoid duplicate logging.
for h in self._logger.handlers:
self._logger.removeHandler(h)
self._log_handler = _WorkerLogHandler(self)
self._logger.addHandler(self._log_handler)
self._logger.setLevel(self.log_level)
class _WorkerLogHandler(logging.Handler):
def __init__(self, worker):
logging.Handler.__init__(self)
self._worker = worker
self.setLevel(worker.log_level)
def emit(self, record):
self._worker.log_messages.append(record)
| bsd-3-clause |
ops-hero/trebuchet | trebuchet/lib/controller.py | 1 | 2427 | import itertools
from .package import get_packages
from .my_yaml import load_yaml_config, print_pretty, get_yaml_config
from .callbacks import do_web_callback, jsonfy_pkg
def _get_all_packages(project, architecture=None, options=None, version_options=None):
"""
Generator of packages to be built.
"""
configs = get_yaml_config(project.config_file)
pkg_list = []
# Retrieve all packages from the configuration file.
for config in configs:
pkg = get_packages(project.full_path,
config=config,
architecture=architecture,
options=options,
version_options=version_options)
pkg_list.append(pkg)
# yield the packages again
for pkg in itertools.chain.from_iterable(pkg_list):
yield pkg
def check_config(project):
"""
"""
return [ pkg.full_package_name for pkg in _get_all_packages(project) ]
def build_app(project, prepare, package, version_options=None):
"""
Locally build the application packages
"""
pkg_list = []
for pkg in _get_all_packages(project,
architecture=prepare.architecture,
options={"pip_options": prepare.pip_options},
version_options=version_options):
pkg.build(package.debs_path,
extra_description=prepare.extra_description)
pkg_list.append(pkg.final_deb_name)
print "Built: " + str(jsonfy_pkg(pkg))
if package.web_callback_url:
do_web_callback(package.web_callback_url, pkg)
print pkg_list
def develop_app(project, prepare, version_options=None):
"""
Locally prepare the working copy to be built for the application packages
"""
pkg_list = []
for pkg in _get_all_packages(project,
architecture=prepare.architecture,
options={"pip_options": prepare.pip_options},
version_options=version_options):
pkg.develop(extra_description=prepare.extra_description)
pkg_list.append(pkg.final_deb_name)
print pkg_list
def print_build_details(project, versions):
"""
Print some details related to the build.
"""
print "config file to use: " + project.config_file
print "project to use: " + project.full_path
print versions
| gpl-3.0 |
davidcoallier/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/sgilink.py | 61 | 2219 | """SCons.Tool.sgilink
Tool-specific initialization for the SGI MIPSPro linker on SGI.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sgilink.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Util
import link
linkers = ['CC', 'cc']
def generate(env):
"""Add Builders and construction variables for MIPSPro to an Environment."""
link.generate(env)
env['LINK'] = env.Detect(linkers) or 'cc'
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared')
# __RPATH is set to $_RPATH in the platform specification if that
# platform supports it.
env.Append(LINKFLAGS=['$__RPATH'])
env['RPATHPREFIX'] = '-rpath '
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
def exists(env):
return env.Detect(linkers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
sivertkh/gtrackcore | gtrackcore/core/Api.py | 1 | 6929 | import shutil, os, sys
import logging
from gtrackcore.input.adapters.TrackGenomeElementSource import TrackViewListGenomeElementSource
from gtrackcore.preprocess.PreProcessTracksJob import PreProcessTrackGESourceJob
from gtrackcore.track.hierarchy.ProcTrackNameSource import ProcTrackNameSource
from gtrackcore.track.hierarchy.ProcTrackOptions import ProcTrackOptions
def trackNameExists(genome, trackName):
"""
Check if there exists a track <trackName> in genome <genome>.
:param genome: String, name of genome
:param trackName: String, name of track
:return: True if track exists, false else.
"""
return ProcTrackOptions.isValidTrack(genome, trackName)
def importTrackFromTrackContents(trackContents, trackName):
"""
:param trackContents:
:param trackName:
:return:
"""
genome = trackContents.genome.name
trackName = _convertTrackName(trackName)
logging.debug("Importing trackContent: Name: {0}, genome: {1}".format(
trackName, genome))
if trackNameExists(genome, trackName):
return
geSource = TrackViewListGenomeElementSource(genome,
trackContents.trackViews,
trackName,
allowOverlaps=trackContents.allowOverlaps)
job = PreProcessTrackGESourceJob(genome, trackName, geSource)
job.process()
def getAvailableGenomes():
"""
Returns a list of available genomes
:return: List of genome names
"""
dirPath = _getDirPath()
return os.listdir(dirPath)
def getAvailableTracks(genome):
"""
Retuns a list of available tracks for a given genome
:param genome: Genome given
:return: List of track names
"""
_getDirPath(genome)
return ProcTrackNameSource(genome)
def deleteTrack(genome, trackname):
# TODO. Remove a track from GTrackCore
raise NotImplementedError
# *** Old API ****
def importFile(fileName, genome, trackName):
"""fileName genome trackName"""
trackName = _convertTrackName(trackName)
from gtrackcore.util.CommonFunctions import createOrigPath, ensurePathExists
origFn = createOrigPath(genome, trackName, os.path.basename(fileName))
if os.path.exists(origFn):
shutil.rmtree(os.path.dirname(origFn))
ensurePathExists(origFn)
shutil.copy(fileName, origFn)
os.chmod(origFn, 0664)
from gtrackcore.preprocess.PreProcessTracksJob import PreProcessAllTracksJob
PreProcessAllTracksJob(genome, trackName).process()
def _convertTrackName(trackName):
from gtrackcore.util.CommonFunctions import convertTNstrToTNListFormat
return convertTNstrToTNListFormat(trackName, doUnquoting=True)
def _trackNameExists(genome, trackName):
from gtrackcore.track.hierarchy.ProcTrackOptions import ProcTrackOptions
if not ProcTrackOptions.isValidTrack(genome, trackName):
print 'Track "%s" of genome "%s" is not valid.' % (':'.join(trackName), genome)
return False
return True
def _getDirPath(genome=''):
from gtrackcore.util.CommonFunctions import createDirPath, ensurePathExists
dirPath = createDirPath([], '')
ensurePathExists(dirPath)
return dirPath
def listAvailableGenomes():
""
print 'List of available genomes:'
dirPath = _getDirPath()
for dir in os.listdir(dirPath):
if dir[0] != '.':
print ' ' + dir
def listAvailableTracks(genome):
"genome"
print 'List of available tracks for genome "%s":' % genome
_getDirPath(genome)
from gtrackcore.track.hierarchy.ProcTrackNameSource import ProcTrackNameSource
for trackName in ProcTrackNameSource(genome):
print(type(trackName))
print ' ' + ':'.join(trackName)
def getExtractionOptions(genome, trackName):
"""genome trackName"""
trackName = _convertTrackName(trackName)
if not _trackNameExists(genome, trackName):
return
print
print 'Available extraction options for track "%s" of genome "%s":' % (':'.join(trackName), genome)
print
print '{:<19}'.format('fileFormatName') + '{:<17}'.format('allowOverlaps') + 'Description'
print '-'*80
from gtrackcore.extract.TrackExtractor import TrackExtractor
for text, suffix in TrackExtractor.getTrackExtractionOptions(genome, trackName):
fileFormatName, asOriginal, allowOverlaps = TrackExtractor.getAttrsFromExtractionFormat(text)
print '{:<19}'.format(fileFormatName) + '{:<17}'.format(str(allowOverlaps)) + text
def _commonExportFile(outFileName, genome, trackName, fileFormatName, allowOverlaps, bins):
trackName = _convertTrackName(trackName)
if not _trackNameExists(genome, trackName):
return
outFileName = os.path.abspath(outFileName)
from gtrackcore.extract.TrackExtractor import TrackExtractor
TrackExtractor.extractOneTrackManyRegsToOneFile(trackName, bins, outFileName, fileFormatName=fileFormatName, \
globalCoords=True, asOriginal=False, \
allowOverlaps=allowOverlaps)
def exportFile(outFileName, genome, trackName, fileFormatName, allowOverlaps):
"""outFileName genome trackName fileFormatName allowOverlaps"""
from gtrackcore.input.userbins.UserBinSource import UserBinSource
bins = UserBinSource('*', '*', genome, includeExtraChrs=True)
_commonExportFile(outFileName, genome, trackName, fileFormatName, allowOverlaps, bins)
def exportFileInRegion(outFileName, genome, trackName, fileFormatName, allowOverlaps, region):
"""outFileName genome trackName fileFormatName allowOverlaps region (e.g. chr21:1m-2m)"""
from gtrackcore.input.userbins.UserBinSource import UserBinSource
bins = UserBinSource(region, '*', genome, includeExtraChrs=True)
_commonExportFile(outFileName, genome, trackName, fileFormatName, allowOverlaps, bins)
def _usage():
print 'syntax: '
print 'to use: [name] [args]'
print 'available commands: '
print ', '.join(funcList.keys())
sys.exit(0)
if __name__ == "__main__":
from collections import OrderedDict
import types
import sys
thisModule = sys.modules[__name__]
funcList = OrderedDict((a, thisModule.__dict__.get(a)) for a in sorted(dir(thisModule))
if isinstance(thisModule.__dict__.get(a), types.FunctionType) and a[0] != '_')
if len(sys.argv) == 1:
_usage()
else:
assert( len(sys.argv) >= 2)
if not sys.argv[1] in funcList:
_usage()
else:
try:
func = funcList[sys.argv[1]]
func(*sys.argv[2:])
except:
print
print 'usage: python Api.py ' + str(func.__name__) + ' ' + str(func.__doc__)
print
raise
| gpl-3.0 |
fengren/python_koans | python2/koans/about_proxy_object_project.py | 78 | 4194 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Project: Create a Proxy Class
#
# In this assignment, create a proxy class (one is started for you
# below). You should be able to initialize the proxy object with any
# object. Any attributes called on the proxy object should be forwarded
# to the target object. As each attribute call is sent, the proxy should
# record the name of the attribute sent.
#
# The proxy class is started for you. You will need to add a method
# missing handler and any other supporting methods. The specification
# of the Proxy class is given in the AboutProxyObjectProject koan.
# Note: This is a bit trickier than its Ruby Koans counterpart, but you
# can do it!
from runner.koan import *
class Proxy(object):
def __init__(self, target_object):
# WRITE CODE HERE
#initialize '_obj' attribute last. Trust me on this!
self._obj = target_object
# WRITE CODE HERE
# The proxy object should pass the following Koan:
#
class AboutProxyObjectProject(Koan):
def test_proxy_method_returns_wrapped_object(self):
# NOTE: The Television class is defined below
tv = Proxy(Television())
self.assertTrue(isinstance(tv, Proxy))
def test_tv_methods_still_perform_their_function(self):
tv = Proxy(Television())
tv.channel = 10
tv.power()
self.assertEqual(10, tv.channel)
self.assertTrue(tv.is_on())
def test_proxy_records_messages_sent_to_tv(self):
tv = Proxy(Television())
tv.power()
tv.channel = 10
self.assertEqual(['power', 'channel'], tv.messages())
def test_proxy_handles_invalid_messages(self):
tv = Proxy(Television())
ex = None
try:
tv.no_such_method()
except AttributeError as ex:
pass
self.assertEqual(AttributeError, type(ex))
def test_proxy_reports_methods_have_been_called(self):
tv = Proxy(Television())
tv.power()
tv.power()
self.assertTrue(tv.was_called('power'))
self.assertFalse(tv.was_called('channel'))
def test_proxy_counts_method_calls(self):
tv = Proxy(Television())
tv.power()
tv.channel = 48
tv.power()
self.assertEqual(2, tv.number_of_times_called('power'))
self.assertEqual(1, tv.number_of_times_called('channel'))
self.assertEqual(0, tv.number_of_times_called('is_on'))
def test_proxy_can_record_more_than_just_tv_objects(self):
proxy = Proxy("Py Ohio 2010")
result = proxy.upper()
self.assertEqual("PY OHIO 2010", result)
result = proxy.split()
self.assertEqual(["Py", "Ohio", "2010"], result)
self.assertEqual(['upper', 'split'], proxy.messages())
# ====================================================================
# The following code is to support the testing of the Proxy class. No
# changes should be necessary to anything below this comment.
# Example class using in the proxy testing above.
class Television(object):
def __init__(self):
self._channel = None
self._power = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
def power(self):
if self._power == 'on':
self._power = 'off'
else:
self._power = 'on'
def is_on(self):
return self._power == 'on'
# Tests for the Television class. All of theses tests should pass.
class TelevisionTest(Koan):
def test_it_turns_on(self):
tv = Television()
tv.power()
self.assertTrue(tv.is_on())
def test_it_also_turns_off(self):
tv = Television()
tv.power()
tv.power()
self.assertFalse(tv.is_on())
def test_edge_case_on_off(self):
tv = Television()
tv.power()
tv.power()
tv.power()
self.assertTrue(tv.is_on())
tv.power()
self.assertFalse(tv.is_on())
def test_can_set_the_channel(self):
tv = Television()
tv.channel = 11
self.assertEqual(11, tv.channel)
| mit |
bcferrycoder/holideck | requests/packages/chardet2/sbcharsetprober.py | 25 | 4689 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
self._mReversed = reversed # TRUE if we need to reverse every pair in the model lookup
self._mNameProber = nameProber # Optional auxiliary prober for name decision
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mLastOrder = 255 # char order of last character
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
self._mFreqChar = 0 # characters that fall in our sampling range
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][c]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
self._mSeqCounters[self._mModel['precedenceMatrix'][(self._mLastOrder * SAMPLE_SIZE) + order]] += 1
else: # reverse the order of the letters in the lookup
self._mSeqCounters[self._mModel['precedenceMatrix'][(order * SAMPLE_SIZE) + self._mLastOrder]] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a winner\n' % (self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative shortcut threshhold %s\n' % (self._mModel['charsetName'], cf, NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
# print self._mSeqCounters[POSITIVE_CAT], self._mTotalSeqs, self._mModel['mTypicalPositiveRatio']
r = (1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs / self._mModel['mTypicalPositiveRatio']
# print r, self._mFreqChar, self._mTotalChar
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| mit |
google/rekall | setup.py | 1 | 4076 | #!/usr/bin/env python
# Rekall
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Authors:
# Michael Cohen <scudette@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""Meta-script for pulling in all Rekall components."""
from __future__ import print_function
__author__ = "Michael Cohen <scudette@gmail.com>"
import io
import os
import sys
import subprocess
from setuptools import setup
from setuptools.command.install import install as _install
from setuptools.command.develop import develop as _develop
import _version
VERSION = _version.get_versions()
rekall_description = "Rekall Memory Forensic Framework"
# This is a metapackage which pulls in the dependencies. There are two main
# installation scenarios:
# 1) We get installed from PyPi from our own sdist. In this case we need to
# declare dependencies on the released PyPi packages.
# 2) We get run from the root of the source tree (e.g. checked out from git). In
# this case we need to declare the setup.py as a dependency so it gets installed
# first.
class install(_install):
def do_egg_install(self):
path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "rekall-core", "setup.py"))
if os.access(path, os.F_OK):
print("Installing rekall-core from local directory.")
subprocess.check_call([sys.executable, "setup.py", "install"],
cwd="rekall-core")
# Need to call this directly because _install.run does crazy stack
# walking and falls back to compatibility mode.
_install.do_egg_install(self)
class develop(_develop):
def run(self):
path = os.path.abspath(
os.path.join(os.path.dirname(__file__), "rekall-core", "setup.py"))
if os.access(path, os.F_OK):
print("Installing rekall-core from local directory.")
subprocess.check_call([sys.executable, "setup.py", "develop"],
cwd="rekall-core")
_develop.run(self)
def find_data_files(source):
result = []
for directory, _, files in os.walk(source):
files = [os.path.join(directory, x) for x in files]
result.append((directory, files))
return result
commands = dict(
install=install,
develop=develop
)
# This requires an exact version to ensure that installing the meta package
# pulls in tested dependencies.
install_requires = [
"rekall-agent >= 1.7.0rc1, < 1.8",
"rekall-lib >= 1.7.0rc1, < 1.8",
"rekall-core >= 1.7.0rc1, < 1.8",
"ipython==6.5.0",
]
setup(
name="rekall",
version=VERSION["pep440"],
cmdclass=commands,
description=rekall_description,
long_description=io.open("README.md", "rt", encoding='utf8').read(),
license="GPL",
url="https://www.rekall-forensic.com/",
author="The Rekall team",
author_email="rekall-discuss@googlegroups.com",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
install_requires=install_requires,
extras_require={
# The following requirements are needed in Windows.
':sys_platform=="win32"': [
"pyreadline >= 2.0",
],
':sys_platform!="win32"': [
"readline",
],
},
data_files=find_data_files("tools"),
)
| gpl-2.0 |
Caylo/easybuild-framework | easybuild/toolchains/mpi/craympich.py | 2 | 3478 | ##
# Copyright 2014-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
MPI support for the Cray Programming Environment (craype).
:author: Petar Forai (IMP/IMBA, Austria)
:author: Kenneth Hoste (Ghent University)
"""
from easybuild.toolchains.compiler.craype import CrayPECompiler
from easybuild.toolchains.mpi.mpich import TC_CONSTANT_MPICH, TC_CONSTANT_MPI_TYPE_MPICH
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.toolchain.constants import COMPILER_VARIABLES, MPI_COMPILER_TEMPLATE, MPI_COMPILER_VARIABLES
from easybuild.tools.toolchain.constants import SEQ_COMPILER_TEMPLATE
from easybuild.tools.toolchain.mpi import Mpi
class CrayMPICH(Mpi):
"""Generic support for using Cray compiler wrappers"""
# MPI support
# no separate module, Cray compiler drivers always provide MPI support
MPI_MODULE_NAME = []
MPI_FAMILY = TC_CONSTANT_MPICH
MPI_TYPE = TC_CONSTANT_MPI_TYPE_MPICH
MPI_COMPILER_MPICC = CrayPECompiler.COMPILER_CC
MPI_COMPILER_MPICXX = CrayPECompiler.COMPILER_CXX
MPI_COMPILER_MPIF77 = CrayPECompiler.COMPILER_F77
MPI_COMPILER_MPIF90 = CrayPECompiler.COMPILER_F90
MPI_COMPILER_MPIFC = CrayPECompiler.COMPILER_FC
# no MPI wrappers, so no need to specify serial compiler
MPI_SHARED_OPTION_MAP = dict([('_opt_%s' % var, '') for var, _ in MPI_COMPILER_VARIABLES])
def _set_mpi_compiler_variables(self):
"""Set the MPI compiler variables"""
for var_tuple in COMPILER_VARIABLES:
c_var = var_tuple[0] # [1] is the description
var = MPI_COMPILER_TEMPLATE % {'c_var':c_var}
value = getattr(self, 'MPI_COMPILER_%s' % var.upper(), None)
if value is None:
raise EasyBuildError("_set_mpi_compiler_variables: mpi compiler variable %s undefined", var)
self.variables.nappend_el(var, value)
if self.options.get('usempi', None):
var_seq = SEQ_COMPILER_TEMPLATE % {'c_var': c_var}
seq_comp = self.variables[c_var]
self.log.debug('_set_mpi_compiler_variables: usempi set: defining %s as %s', var_seq, seq_comp)
self.variables[var_seq] = seq_comp
if self.options.get('cciscxx', None):
self.log.debug("_set_mpi_compiler_variables: cciscxx set: switching MPICXX %s for MPICC value %s" %
(self.variables['MPICXX'], self.variables['MPICC']))
self.variables['MPICXX'] = self.variables['MPICC']
| gpl-2.0 |
luis-rr/saga-python | docs/concepts/exceptions.py | 5 | 5931 |
import sys
import traceback
# ------------------------------------------------------------------------------
#
class MyEx (Exception) :
# --------------------------------------------------------------------------
#
# the exception constructor always needs a message (no need complaining if
# there is nothing to complain about), but can also get a parent exception,
# which usually should indicate the error which triggered *this* exception.
#
def __init__ (self, msg, parent=None) :
ptype = type(parent).__name__ # exception type for parent
stype = type(self).__name__ # exception type for self, useful for
# inherited exceptions
# did we get a parent exception?
if parent :
# if so, then this exception is likely created in some 'except'
# clause, as a reaction on a previously catched exception (the
# parent). Thus we append the message of the parent to our own
# message, but keep the parent's traceback (after all, the original
# exception location is what we are interested in).
#
if isinstance (parent, MyEx) :
# that all works nicely when parent is our own exception type...
self.traceback = parent.traceback
frame = traceback.extract_stack ()[-2]
line = "%s +%s (%s) : %s" % frame
self.msg = " %-20s: %s (%s)\n%s" % (stype, msg, line, parent.msg)
else :
# ... but if parent is a native (or any other) exception type,
# we don't have a traceback really -- so we dig it out of
# sys.exc_info.
trace = sys.exc_info ()[2]
stack = traceback.extract_tb (trace)
traceback_list = traceback.format_list (stack)
self.traceback = "".join (traceback_list)
# the message composition is very similar -- we just inject the
# parent exception type inconspicuously somewhere (above that
# was part of 'parent.msg' already).
frame = traceback.extract_stack ()[-2]
line = "%s +%s (%s) : %s" % frame
self.msg = " %-20s: %s (%s)\n %-20s: %s" % (stype, msg, line, ptype, parent)
else :
# if we don't have a parent, we are a 1st principle exception,
# i.e. a reaction to some genuine code error. Thus we extract the
# traceback from exactly where we are in the code (the last stack
# frame will be the call to this exception constructor), and we
# create the original exception message from 'stype' and 'msg'.
stack = traceback.extract_stack ()
traceback_list = traceback.format_list (stack)
self.traceback = "".join (traceback_list[:-1])
self.msg = " %-20s: %s" % (stype, msg)
# convenience method for string conversion -- simply returns message
def __str__ (self) :
return self.msg
# ------------------------------------------------------------------------------
#
# inherit a couple of exception types (they'll get the correct 'stype' above)
#
class MyEx_1 (MyEx) : pass
class MyEx_2 (MyEx) : pass
class MyEx_3 (MyEx) : pass
# ------------------------------------------------------------------------------
#
# This is the interesting part -- that call triggers a couple of exceptions.
# Run like this:
#
# 0) run as is
# 1) comment out #1 -- run again
# 2) comment out #2 -- run again
# 3) comment out #3 -- run again
#
# The four cases above are basically:
#
# 0) native exception in code
# 1) custom exception in code
# 2) native exception in try
# 3) custom exception in try
#
# Read on below for more cases though
#
def call_4 () :
d = int ('test') # 1
e = MyEx_1 ("exception in code_4")
raise e # 2
try :
d = int ('test') # 3
e = MyEx_1 ("exception in try_4")
raise e
except Exception as ex :
e = MyEx_2 ("exception in except_4", ex)
raise e
# ------------------------------------------------------------------------------
#
# one level up in the call stack, we catch/convert exceptions from call_4
#
def call_3 () :
# enable this if you want the exceptions to fall through to main. You can
# do this for all four cases above.
# Note that main will only catch 'MyEx' typed exceptions.
#
# call_4 ()
#
try :
call_4 ()
except Exception as ex :
e = MyEx_3 ("exception in except_3", ex)
raise e
# ------------------------------------------------------------------------------
# make the call stack a little deeper, for fun
def call_2 () : call_3 ()
def call_1 () : call_2 ()
# ------------------------------------------------------------------------------
# _________________________________
# / \
# | # # ## # # # |
# | ## ## # # # ## # |
# | # ## # # # # # # # |
# | # # ###### # # # # |
# | # # # # # # ## |
# | # # # # # # # |
# \ /
# ---------------------------------
# \ ^__^
# \ (oo)\_______
# (__)\ )\/\
# ||----w |
# || ||
#
# enable this if you want to see uncatched exceptions -- you can do this for all
# eight cases above.
#
# call_1 ()
#
try :
call_1 ()
except MyEx as e :
print "=================================="
print e
print "=================================="
print e.traceback
print "=================================="
pass
| mit |
a-doumoulakis/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/sigmoid.py | 85 | 1141 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sigmoid bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.distributions.python.ops.bijectors.sigmoid_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ["Sigmoid"]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
bruab/reddit_sna | src/compare_subreddits.py | 1 | 4903 | #!/usr/bin/env python3
import sys
import praw
from pprint import pprint
def get_all_redditors_from_a_sub(praw_handle, sub, num_comments=None):
"""Return a list of users who submitted the last num_comments comments to sub"""
# if num_comments == None, get as many comments as possible
all_redditors = []
# Get hot submissions for subreddit
sub = praw_handle.get_subreddit(sub)
# DEBUG SETTING!
num_comments = 50
comments = sub.get_comments(limit=num_comments)
for comment in comments:
if comment.author not in all_redditors:
all_redditors.append(comment.author)
return all_redditors
def main():
if len(sys.argv) != 3:
sys.stderr.write("usage: compare_subreddits.py <subreddit_1> <subreddit_2>\n")
sys.exit()
sub1 = sys.argv[1]
sub2 = sys.argv[2]
user_agent = ("reddit_sna scraper v0.1 by /u/sna_bot "
"https://github.com/brianreallymany/reddit_sna")
r = praw.Reddit(user_agent=user_agent)
group1 = get_all_redditors_from_a_sub(r, sub1)
group2 = get_all_redditors_from_a_sub(r, sub2)
# print some stuff about group overlap
common_users = [u for u in group1 if u in group2]
denominator = min(len(group1), len(group2))
numerator = len(common_users)
O_r = float(numerator) / denominator
print("users in " + sub1 + ": " + str(len(group1)))
print("users in " + sub2 + ": " + str(len(group2)))
print("users in common: " + str(len(common_users)))
print("O_r is " + str(O_r))
sys.stderr.write("checking comments for each user now\n\n")
for user in group1:
num_user_comments = None
# DEBUG
num_user_comments = 50
sys.stderr.write("checking comments for user " + str(user) + "\n")
user_comments = user.get_comments(limit=num_user_comments)
for comment in user_comments:
sys.stderr.write("\tcurrently inspecting this comment: " + str(comment) + " ...from " + str(comment.subreddit) + "\n")
# check who replied to it
sys.stderr.write("\tfound " + str(len(comment.replies)) + " replies...\n")
for reply in comment.replies:
sys.stderr.write("\t\tlooking at a reply by " + str(reply.author) + "\n")
if reply.author in group2:
sys.stderr.write("found one!\n")
print(str(reply.author) + ", a user in " + str(group1) +
", replied to " + str(user) + ", a user in " + str(group2) +
"in this comment: " + str(reply) + " ... in this subreddit: " +
str(reply.subreddit))
# check parent comments
current_comment = comment
while not current_comment.is_root:
sys.stderr.write("\tthis comment has a parent; fetching it now.\n")
current_comment = r.get_info(thing_id=current_comment.parent_id)
sys.stderr.write("\tparent comment author is " + str(current_comment.author) + "\n")
if current_comment.author in group2:
sys.stderr.write("found one!\n")
print(str(reply.author) + ", a user in " + str(group1) +
", replied to " + str(user) + ", a user in " + str(group2) +
"in this comment: " + str(reply) + " ... in this subreddit: " +
str(reply.subreddit))
# for each user, find users responded to and users who responded
# TODO how to store that info? each user can have 2 dicts -- "users_replied_to" and "users_who_replied"
# but that seems clunky; prolly whole thing should be in one place.
# ultimately what we're after is ... what ... basically if a redpill users replied to or was replied to by
# a feminism user, we want to know where. and vice versa. so that's it, just a report
# on where users from sub1 interacted with users from sub2, and maybe what kind of interaction
# could even just be a table at first -- sub1 username \t sub2 username \t who replied to whom \t where
# ok this is better, clearer. so for each user, get all their comments
# for each comment tree (hand wa ving), check upstream. if find opposite sub user,
# print stuff
# then check downstream. if find opposite sub user, print stuff
# so have to figure out how to go from user to thread, and search parents/children on the thread
# that doesn't sound too bad. oh yeah and also find what subreddit the thread is from, no big deal.
# something like for comment, if not is_root then parent = r.get_info(thing_id=comment.parent_id)
# and now we got the parent comment. check if it's opposite sub or not. and keep going
############################################################################
if __name__ == '__main__':
main()
| mit |
mmnelemane/neutron | neutron/tests/api/base_security_groups.py | 48 | 2176 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from neutron.tests.api import base
class BaseSecGroupTest(base.BaseNetworkTest):
@classmethod
def resource_setup(cls):
super(BaseSecGroupTest, cls).resource_setup()
def _create_security_group(self):
# Create a security group
name = data_utils.rand_name('secgroup-')
group_create_body = self.client.create_security_group(name=name)
self.addCleanup(self._delete_security_group,
group_create_body['security_group']['id'])
self.assertEqual(group_create_body['security_group']['name'], name)
return group_create_body, name
def _delete_security_group(self, secgroup_id):
self.client.delete_security_group(secgroup_id)
# Asserting that the security group is not found in the list
# after deletion
list_body = self.client.list_security_groups()
secgroup_list = list()
for secgroup in list_body['security_groups']:
secgroup_list.append(secgroup['id'])
self.assertNotIn(secgroup_id, secgroup_list)
def _delete_security_group_rule(self, rule_id):
self.client.delete_security_group_rule(rule_id)
# Asserting that the security group is not found in the list
# after deletion
list_body = self.client.list_security_group_rules()
rules_list = list()
for rule in list_body['security_group_rules']:
rules_list.append(rule['id'])
self.assertNotIn(rule_id, rules_list)
| apache-2.0 |
jokerdino/unity-tweak-tool | UnityTweakTool/config/logging.py | 2 | 1498 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Team:
# J Phani Mahesh <phanimahesh@gmail.com>
# Barneedhar (jokerdino) <barneedhar@ubuntu.com>
# Amith KK <amithkumaran@gmail.com>
# Georgi Karavasilev <motorslav@gmail.com>
# Sam Tran <samvtran@gmail.com>
# Sam Hewitt <hewittsamuel@gmail.com>
# Angel Araya <al.arayaq@gmail.com>
#
# Description:
# A One-stop configuration tool for Unity.
#
# Legal Stuff:
#
# This file is a part of Unity Tweak Tool
#
# Unity Tweak Tool is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# Unity Tweak Tool is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <https://www.gnu.org/licenses/gpl-3.0.txt>
# This file should control the logging setup for entire application
import logging
import xdg.BaseDirectory
import os
logger=logging.getLogger('UnityTweakTool.config.logging')
# This makes the directory if missing.
CACHEDIR = xdg.BaseDirectory.save_cache_path('unity-tweak-tool')
LOGFILE = os.path.join(CACHEDIR,'debug.log')
LOGFMT = '%(asctime)s - %(levelname)-8s :: %(name)s - %(funcName)s - %(message)s'
LOGLVL = logging.DEBUG
| gpl-3.0 |
jorgebodega/PDL | library/test/yacc_simple.py | 174 | 1542 | # -----------------------------------------------------------------------------
# yacc_simple.py
#
# A simple, properly specifier grammar
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| gpl-3.0 |
joeythesaint/yocto-autobuilder | lib/python2.7/site-packages/autobuilder/YoctoMailer.py | 3 | 4464 | '''
Created on June 28, 2013
__author__ = "Elizabeth 'pidge' Flanagan"
__copyright__ = "Copyright 2012-2013, Intel Corp."
__credits__ = ["Elizabeth Flanagan"]
__license__ = "GPL"
__version__ = "2.0"
__maintainer__ = "Elizabeth Flanagan"
__email__ = "elizabeth.flanagan@intel.com"
'''
from buildbot.status.mail import MailNotifier
from buildbot.status.mail import *
from buildbot import interfaces, util, config
from buildbot.process.users import users
from buildbot.status import base
from buildbot.status.results import FAILURE, SUCCESS, WARNINGS, EXCEPTION, Results
from twisted.python import log
class YoctoMailNotifier(MailNotifier):
def __init__(self, fromaddr, mode=("failing"),
categories=None, branches=[], yoctorepos=[], builders=None, addLogs=False,
relayhost="localhost", buildSetSummary=False,
subject="buildbot %(result)s in %(title)s on %(builder)s",
lookup=None, extraRecipients=[],
sendToInterestedUsers=True, customMesg=None,
messageFormatter=defaultMessage, extraHeaders=None,
addPatch=True, useTls=False,
smtpUser=None, smtpPassword=None, smtpPort=25):
self.fromaddr = fromaddr
self.mode=mode
self.categories = categories
self.branches = branches
self.yoctorepos = yoctorepos
self.builders = builders
self.addLogs = addLogs
self.relayhost = relayhost
self.buildSetSummary = buildSetSummary
self.subject = subject
self.lookup = lookup
self.extraRecipients = extraRecipients
self.sendToInterestedUsers = sendToInterestedUsers
self.customMesg = customMesg
self.messageFormatter = messageFormatter
self.extraHeaders = extraHeaders
self.addPatch = addPatch
self.useTls = useTls
self.smtpUser = smtpUser
self.smtpPassword = smtpPassword
self.smtpPort = smtpPort
MailNotifier.__init__(self, fromaddr, mode=self.mode,
categories = self.categories, builders = self.builders,
addLogs = self.addLogs, relayhost = self.relayhost,
buildSetSummary = self.buildSetSummary,
subject = self.subject,
lookup = self.lookup,
extraRecipients = self.extraRecipients,
sendToInterestedUsers = self.sendToInterestedUsers,
customMesg = self.customMesg,
messageFormatter = self.messageFormatter,
extraHeaders = self.extraHeaders, addPatch = self.addPatch,
useTls = self.useTls, smtpUser = self.smtpUser,
smtpPassword = self.smtpPassword, smtpPort = self.smtpPort)
def isMailNeeded(self, build, results):
# here is where we actually do something.
builder = build.getBuilder()
repo=build.getProperty("repository")
branch=build.getProperty("branch")
log.msg(repo)
log.msg(branch)
buildme = False
if self.builders is not None and builder.name not in self.builders:
return False # ignore this build
if self.categories is not None and \
builder.category not in self.categories:
return False # ignore this build
if self.yoctorepos is not None and repo not in self.yoctorepos:
return False # ignore this build
elif self.yoctorepos is None and self.branches is not None and branch in self.branches:
return False
elif self.yoctorepos is not None and repo in self.yoctorepos and self.branches is None:
buildme = True
else:
buildme = True
if buildme is True:
prev = build.getPreviousBuild()
if "change" in self.mode:
if prev and prev.getResults() != results:
return True
if "failing" in self.mode and results == FAILURE:
return True
if "passing" in self.mode and results == SUCCESS:
return True
if "problem" in self.mode and results == FAILURE:
if prev and prev.getResults() != FAILURE:
return True
if "warnings" in self.mode and results == WARNINGS:
return True
if "exception" in self.mode and results == EXCEPTION:
return True
return False
| gpl-2.0 |
Imaginashion/cloud-vision | .fr-d0BNfn/django-jquery-file-upload/venv/lib/python3.5/site-packages/PIL/PcfFontFile.py | 72 | 6194 | #
# THIS IS WORK IN PROGRESS
#
# The Python Imaging Library
# $Id$
#
# portable compiled font file parser
#
# history:
# 1997-08-19 fl created
# 2003-09-13 fl fixed loading of unicode fonts
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1997-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
from PIL import FontFile
from PIL import _binary
# --------------------------------------------------------------------
# declarations
PCF_MAGIC = 0x70636601 # "\x01fcp"
PCF_PROPERTIES = (1 << 0)
PCF_ACCELERATORS = (1 << 1)
PCF_METRICS = (1 << 2)
PCF_BITMAPS = (1 << 3)
PCF_INK_METRICS = (1 << 4)
PCF_BDF_ENCODINGS = (1 << 5)
PCF_SWIDTHS = (1 << 6)
PCF_GLYPH_NAMES = (1 << 7)
PCF_BDF_ACCELERATORS = (1 << 8)
BYTES_PER_ROW = [
lambda bits: ((bits+7) >> 3),
lambda bits: ((bits+15) >> 3) & ~1,
lambda bits: ((bits+31) >> 3) & ~3,
lambda bits: ((bits+63) >> 3) & ~7,
]
i8 = _binary.i8
l16 = _binary.i16le
l32 = _binary.i32le
b16 = _binary.i16be
b32 = _binary.i32be
def sz(s, o):
return s[o:s.index(b"\0", o)]
##
# Font file plugin for the X11 PCF format.
class PcfFontFile(FontFile.FontFile):
name = "name"
def __init__(self, fp):
magic = l32(fp.read(4))
if magic != PCF_MAGIC:
raise SyntaxError("not a PCF file")
FontFile.FontFile.__init__(self)
count = l32(fp.read(4))
self.toc = {}
for i in range(count):
type = l32(fp.read(4))
self.toc[type] = l32(fp.read(4)), l32(fp.read(4)), l32(fp.read(4))
self.fp = fp
self.info = self._load_properties()
metrics = self._load_metrics()
bitmaps = self._load_bitmaps(metrics)
encoding = self._load_encoding()
#
# create glyph structure
for ch in range(256):
ix = encoding[ch]
if ix is not None:
x, y, l, r, w, a, d, f = metrics[ix]
glyph = (w, 0), (l, d-y, x+l, d), (0, 0, x, y), bitmaps[ix]
self.glyph[ch] = glyph
def _getformat(self, tag):
format, size, offset = self.toc[tag]
fp = self.fp
fp.seek(offset)
format = l32(fp.read(4))
if format & 4:
i16, i32 = b16, b32
else:
i16, i32 = l16, l32
return fp, format, i16, i32
def _load_properties(self):
#
# font properties
properties = {}
fp, format, i16, i32 = self._getformat(PCF_PROPERTIES)
nprops = i32(fp.read(4))
# read property description
p = []
for i in range(nprops):
p.append((i32(fp.read(4)), i8(fp.read(1)), i32(fp.read(4))))
if nprops & 3:
fp.seek(4 - (nprops & 3), 1) # pad
data = fp.read(i32(fp.read(4)))
for k, s, v in p:
k = sz(data, k)
if s:
v = sz(data, v)
properties[k] = v
return properties
def _load_metrics(self):
#
# font metrics
metrics = []
fp, format, i16, i32 = self._getformat(PCF_METRICS)
append = metrics.append
if (format & 0xff00) == 0x100:
# "compressed" metrics
for i in range(i16(fp.read(2))):
left = i8(fp.read(1)) - 128
right = i8(fp.read(1)) - 128
width = i8(fp.read(1)) - 128
ascent = i8(fp.read(1)) - 128
descent = i8(fp.read(1)) - 128
xsize = right - left
ysize = ascent + descent
append(
(xsize, ysize, left, right, width,
ascent, descent, 0)
)
else:
# "jumbo" metrics
for i in range(i32(fp.read(4))):
left = i16(fp.read(2))
right = i16(fp.read(2))
width = i16(fp.read(2))
ascent = i16(fp.read(2))
descent = i16(fp.read(2))
attributes = i16(fp.read(2))
xsize = right - left
ysize = ascent + descent
append(
(xsize, ysize, left, right, width,
ascent, descent, attributes)
)
return metrics
def _load_bitmaps(self, metrics):
#
# bitmap data
bitmaps = []
fp, format, i16, i32 = self._getformat(PCF_BITMAPS)
nbitmaps = i32(fp.read(4))
if nbitmaps != len(metrics):
raise IOError("Wrong number of bitmaps")
offsets = []
for i in range(nbitmaps):
offsets.append(i32(fp.read(4)))
bitmapSizes = []
for i in range(4):
bitmapSizes.append(i32(fp.read(4)))
# byteorder = format & 4 # non-zero => MSB
bitorder = format & 8 # non-zero => MSB
padindex = format & 3
bitmapsize = bitmapSizes[padindex]
offsets.append(bitmapsize)
data = fp.read(bitmapsize)
pad = BYTES_PER_ROW[padindex]
mode = "1;R"
if bitorder:
mode = "1"
for i in range(nbitmaps):
x, y, l, r, w, a, d, f = metrics[i]
b, e = offsets[i], offsets[i+1]
bitmaps.append(
Image.frombytes("1", (x, y), data[b:e], "raw", mode, pad(x))
)
return bitmaps
def _load_encoding(self):
# map character code to bitmap index
encoding = [None] * 256
fp, format, i16, i32 = self._getformat(PCF_BDF_ENCODINGS)
firstCol, lastCol = i16(fp.read(2)), i16(fp.read(2))
firstRow, lastRow = i16(fp.read(2)), i16(fp.read(2))
default = i16(fp.read(2))
nencoding = (lastCol - firstCol + 1) * (lastRow - firstRow + 1)
for i in range(nencoding):
encodingOffset = i16(fp.read(2))
if encodingOffset != 0xFFFF:
try:
encoding[i+firstCol] = encodingOffset
except IndexError:
break # only load ISO-8859-1 glyphs
return encoding
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.