repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
xq262144/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/auth/__init__.py | 97 | 5361 | import re
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.utils.module_loading import import_by_path
from django.middleware.csrf import rotate_token
from .signals import user_logged_in, user_logged_out, user_login_failed
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
return import_by_path(path)()
def get_backends():
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backends.append(load_backend(backend_path))
if not backends:
raise ImproperlyConfigured('No authentication backends have been defined. Does AUTHENTICATION_BACKENDS contain anything?')
return backends
def _clean_credentials(credentials):
"""
Cleans a dictionary of credentials of potentially sensitive info before
sending to less secure functions.
Not comprehensive - intended for user_login_failed signal
"""
SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I)
CLEANSED_SUBSTITUTE = '********************'
for key in credentials:
if SENSITIVE_CREDENTIALS.search(key):
credentials[key] = CLEANSED_SUBSTITUTE
return credentials
def authenticate(**credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend in get_backends():
try:
user = backend.authenticate(**credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
except PermissionDenied:
# This backend says to stop in our tracks - this user should not be allowed in at all.
return None
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = "%s.%s" % (backend.__module__, backend.__class__.__name__)
return user
# The credentials supplied are invalid to all backends, fire signal
user_login_failed.send(sender=__name__,
credentials=_clean_credentials(credentials))
def login(request, user):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request. Note that data set during
the anonymous session is retained when the user logs in.
"""
if user is None:
user = request.user
# TODO: It would be nice to support different login methods, like signed cookies.
if SESSION_KEY in request.session:
if request.session[SESSION_KEY] != user.pk:
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
request.session[SESSION_KEY] = user.pk
request.session[BACKEND_SESSION_KEY] = user.backend
if hasattr(request, 'user'):
request.user = user
rotate_token(request)
user_logged_in.send(sender=user.__class__, request=request, user=user)
def logout(request):
"""
Removes the authenticated user's ID from the request and flushes their
session data.
"""
# Dispatch the signal before the user is logged out so the receivers have a
# chance to find out *who* logged out.
user = getattr(request, 'user', None)
if hasattr(user, 'is_authenticated') and not user.is_authenticated():
user = None
user_logged_out.send(sender=user.__class__, request=request, user=user)
# remember language choice saved to session
language = request.session.get('django_language')
request.session.flush()
if language is not None:
request.session['django_language'] = language
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user_model():
"""
Returns the User model that is active in this project.
"""
from django.db.models import get_model
try:
app_label, model_name = settings.AUTH_USER_MODEL.split('.')
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'")
user_model = get_model(app_label, model_name)
if user_model is None:
raise ImproperlyConfigured("AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL)
return user_model
def get_user(request):
"""
Returns the user model instance associated with the given request session.
If no user is retrieved an instance of `AnonymousUser` is returned.
"""
from .models import AnonymousUser
try:
user_id = request.session[SESSION_KEY]
backend_path = request.session[BACKEND_SESSION_KEY]
assert backend_path in settings.AUTHENTICATION_BACKENDS
backend = load_backend(backend_path)
user = backend.get_user(user_id) or AnonymousUser()
except (KeyError, AssertionError):
user = AnonymousUser()
return user
def get_permission_codename(action, opts):
"""
Returns the codename of the permission for the specified action.
"""
return '%s_%s' % (action, opts.model_name)
| apache-2.0 |
aliyun/oss-ftp | python27/win32/Lib/idlelib/TreeWidget.py | 20 | 15063 | # XXX TO DO:
# - popup menu
# - support partial or total redisplay
# - key bindings (instead of quick-n-dirty bindings on Canvas):
# - up/down arrow keys to move focus around
# - ditto for page up/down, home/end
# - left/right arrows to expand/collapse & move out/in
# - more doc strings
# - add icons for "file", "module", "class", "method"; better "python" icon
# - callback for selection???
# - multiple-item selection
# - tooltips
# - redo geometry without magic numbers
# - keep track of object ids to allow more careful cleaning
# - optimize tree redraw after expand of subnode
import os
from Tkinter import *
import imp
from idlelib import ZoomHeight
from idlelib.configHandler import idleConf
ICONDIR = "Icons"
# Look for Icons subdirectory in the same directory as this module
try:
_icondir = os.path.join(os.path.dirname(__file__), ICONDIR)
except NameError:
_icondir = ICONDIR
if os.path.isdir(_icondir):
ICONDIR = _icondir
elif not os.path.isdir(ICONDIR):
raise RuntimeError, "can't find icon directory (%r)" % (ICONDIR,)
def listicons(icondir=ICONDIR):
"""Utility to display the available icons."""
root = Tk()
import glob
list = glob.glob(os.path.join(icondir, "*.gif"))
list.sort()
images = []
row = column = 0
for file in list:
name = os.path.splitext(os.path.basename(file))[0]
image = PhotoImage(file=file, master=root)
images.append(image)
label = Label(root, image=image, bd=1, relief="raised")
label.grid(row=row, column=column)
label = Label(root, text=name)
label.grid(row=row+1, column=column)
column = column + 1
if column >= 10:
row = row+2
column = 0
root.images = images
class TreeNode:
def __init__(self, canvas, parent, item):
self.canvas = canvas
self.parent = parent
self.item = item
self.state = 'collapsed'
self.selected = False
self.children = []
self.x = self.y = None
self.iconimages = {} # cache of PhotoImage instances for icons
def destroy(self):
for c in self.children[:]:
self.children.remove(c)
c.destroy()
self.parent = None
def geticonimage(self, name):
try:
return self.iconimages[name]
except KeyError:
pass
file, ext = os.path.splitext(name)
ext = ext or ".gif"
fullname = os.path.join(ICONDIR, file + ext)
image = PhotoImage(master=self.canvas, file=fullname)
self.iconimages[name] = image
return image
def select(self, event=None):
if self.selected:
return
self.deselectall()
self.selected = True
self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def deselect(self, event=None):
if not self.selected:
return
self.selected = False
self.canvas.delete(self.image_id)
self.drawicon()
self.drawtext()
def deselectall(self):
if self.parent:
self.parent.deselectall()
else:
self.deselecttree()
def deselecttree(self):
if self.selected:
self.deselect()
for child in self.children:
child.deselecttree()
def flip(self, event=None):
if self.state == 'expanded':
self.collapse()
else:
self.expand()
self.item.OnDoubleClick()
return "break"
def expand(self, event=None):
if not self.item._IsExpandable():
return
if self.state != 'expanded':
self.state = 'expanded'
self.update()
self.view()
def collapse(self, event=None):
if self.state != 'collapsed':
self.state = 'collapsed'
self.update()
def view(self):
top = self.y - 2
bottom = self.lastvisiblechild().y + 17
height = bottom - top
visible_top = self.canvas.canvasy(0)
visible_height = self.canvas.winfo_height()
visible_bottom = self.canvas.canvasy(visible_height)
if visible_top <= top and bottom <= visible_bottom:
return
x0, y0, x1, y1 = self.canvas._getints(self.canvas['scrollregion'])
if top >= visible_top and height <= visible_height:
fraction = top + height - visible_height
else:
fraction = top
fraction = float(fraction) / y1
self.canvas.yview_moveto(fraction)
def lastvisiblechild(self):
if self.children and self.state == 'expanded':
return self.children[-1].lastvisiblechild()
else:
return self
def update(self):
if self.parent:
self.parent.update()
else:
oldcursor = self.canvas['cursor']
self.canvas['cursor'] = "watch"
self.canvas.update()
self.canvas.delete(ALL) # XXX could be more subtle
self.draw(7, 2)
x0, y0, x1, y1 = self.canvas.bbox(ALL)
self.canvas.configure(scrollregion=(0, 0, x1, y1))
self.canvas['cursor'] = oldcursor
def draw(self, x, y):
# XXX This hard-codes too many geometry constants!
dy = 20
self.x, self.y = x, y
self.drawicon()
self.drawtext()
if self.state != 'expanded':
return y + dy
# draw children
if not self.children:
sublist = self.item._GetSubList()
if not sublist:
# _IsExpandable() was mistaken; that's allowed
return y+17
for item in sublist:
child = self.__class__(self.canvas, self, item)
self.children.append(child)
cx = x+20
cy = y + dy
cylast = 0
for child in self.children:
cylast = cy
self.canvas.create_line(x+9, cy+7, cx, cy+7, fill="gray50")
cy = child.draw(cx, cy)
if child.item._IsExpandable():
if child.state == 'expanded':
iconname = "minusnode"
callback = child.collapse
else:
iconname = "plusnode"
callback = child.expand
image = self.geticonimage(iconname)
id = self.canvas.create_image(x+9, cylast+7, image=image)
# XXX This leaks bindings until canvas is deleted:
self.canvas.tag_bind(id, "<1>", callback)
self.canvas.tag_bind(id, "<Double-1>", lambda x: None)
id = self.canvas.create_line(x+9, y+10, x+9, cylast+7,
##stipple="gray50", # XXX Seems broken in Tk 8.0.x
fill="gray50")
self.canvas.tag_lower(id) # XXX .lower(id) before Python 1.5.2
return cy
def drawicon(self):
if self.selected:
imagename = (self.item.GetSelectedIconName() or
self.item.GetIconName() or
"openfolder")
else:
imagename = self.item.GetIconName() or "folder"
image = self.geticonimage(imagename)
id = self.canvas.create_image(self.x, self.y, anchor="nw", image=image)
self.image_id = id
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
def drawtext(self):
textx = self.x+20-1
texty = self.y-4
labeltext = self.item.GetLabelText()
if labeltext:
id = self.canvas.create_text(textx, texty, anchor="nw",
text=labeltext)
self.canvas.tag_bind(id, "<1>", self.select)
self.canvas.tag_bind(id, "<Double-1>", self.flip)
x0, y0, x1, y1 = self.canvas.bbox(id)
textx = max(x1, 200) + 10
text = self.item.GetText() or "<no text>"
try:
self.entry
except AttributeError:
pass
else:
self.edit_finish()
try:
label = self.label
except AttributeError:
# padding carefully selected (on Windows) to match Entry widget:
self.label = Label(self.canvas, text=text, bd=0, padx=2, pady=2)
theme = idleConf.GetOption('main','Theme','name')
if self.selected:
self.label.configure(idleConf.GetHighlight(theme, 'hilite'))
else:
self.label.configure(idleConf.GetHighlight(theme, 'normal'))
id = self.canvas.create_window(textx, texty,
anchor="nw", window=self.label)
self.label.bind("<1>", self.select_or_edit)
self.label.bind("<Double-1>", self.flip)
self.text_id = id
def select_or_edit(self, event=None):
if self.selected and self.item.IsEditable():
self.edit(event)
else:
self.select(event)
def edit(self, event=None):
self.entry = Entry(self.label, bd=0, highlightthickness=1, width=0)
self.entry.insert(0, self.label['text'])
self.entry.selection_range(0, END)
self.entry.pack(ipadx=5)
self.entry.focus_set()
self.entry.bind("<Return>", self.edit_finish)
self.entry.bind("<Escape>", self.edit_cancel)
def edit_finish(self, event=None):
try:
entry = self.entry
del self.entry
except AttributeError:
return
text = entry.get()
entry.destroy()
if text and text != self.item.GetText():
self.item.SetText(text)
text = self.item.GetText()
self.label['text'] = text
self.drawtext()
self.canvas.focus_set()
def edit_cancel(self, event=None):
try:
entry = self.entry
del self.entry
except AttributeError:
return
entry.destroy()
self.drawtext()
self.canvas.focus_set()
class TreeItem:
"""Abstract class representing tree items.
Methods should typically be overridden, otherwise a default action
is used.
"""
def __init__(self):
"""Constructor. Do whatever you need to do."""
def GetText(self):
"""Return text string to display."""
def GetLabelText(self):
"""Return label text string to display in front of text (if any)."""
expandable = None
def _IsExpandable(self):
"""Do not override! Called by TreeNode."""
if self.expandable is None:
self.expandable = self.IsExpandable()
return self.expandable
def IsExpandable(self):
"""Return whether there are subitems."""
return 1
def _GetSubList(self):
"""Do not override! Called by TreeNode."""
if not self.IsExpandable():
return []
sublist = self.GetSubList()
if not sublist:
self.expandable = 0
return sublist
def IsEditable(self):
"""Return whether the item's text may be edited."""
def SetText(self, text):
"""Change the item's text (if it is editable)."""
def GetIconName(self):
"""Return name of icon to be displayed normally."""
def GetSelectedIconName(self):
"""Return name of icon to be displayed when selected."""
def GetSubList(self):
"""Return list of items forming sublist."""
def OnDoubleClick(self):
"""Called on a double-click on the item."""
# Example application
class FileTreeItem(TreeItem):
"""Example TreeItem subclass -- browse the file system."""
def __init__(self, path):
self.path = path
def GetText(self):
return os.path.basename(self.path) or self.path
def IsEditable(self):
return os.path.basename(self.path) != ""
def SetText(self, text):
newpath = os.path.dirname(self.path)
newpath = os.path.join(newpath, text)
if os.path.dirname(newpath) != os.path.dirname(self.path):
return
try:
os.rename(self.path, newpath)
self.path = newpath
except os.error:
pass
def GetIconName(self):
if not self.IsExpandable():
return "python" # XXX wish there was a "file" icon
def IsExpandable(self):
return os.path.isdir(self.path)
def GetSubList(self):
try:
names = os.listdir(self.path)
except os.error:
return []
names.sort(key = os.path.normcase)
sublist = []
for name in names:
item = FileTreeItem(os.path.join(self.path, name))
sublist.append(item)
return sublist
# A canvas widget with scroll bars and some useful bindings
class ScrolledCanvas:
def __init__(self, master, **opts):
if 'yscrollincrement' not in opts:
opts['yscrollincrement'] = 17
self.master = master
self.frame = Frame(master)
self.frame.rowconfigure(0, weight=1)
self.frame.columnconfigure(0, weight=1)
self.canvas = Canvas(self.frame, **opts)
self.canvas.grid(row=0, column=0, sticky="nsew")
self.vbar = Scrollbar(self.frame, name="vbar")
self.vbar.grid(row=0, column=1, sticky="nse")
self.hbar = Scrollbar(self.frame, name="hbar", orient="horizontal")
self.hbar.grid(row=1, column=0, sticky="ews")
self.canvas['yscrollcommand'] = self.vbar.set
self.vbar['command'] = self.canvas.yview
self.canvas['xscrollcommand'] = self.hbar.set
self.hbar['command'] = self.canvas.xview
self.canvas.bind("<Key-Prior>", self.page_up)
self.canvas.bind("<Key-Next>", self.page_down)
self.canvas.bind("<Key-Up>", self.unit_up)
self.canvas.bind("<Key-Down>", self.unit_down)
#if isinstance(master, Toplevel) or isinstance(master, Tk):
self.canvas.bind("<Alt-Key-2>", self.zoom_height)
self.canvas.focus_set()
def page_up(self, event):
self.canvas.yview_scroll(-1, "page")
return "break"
def page_down(self, event):
self.canvas.yview_scroll(1, "page")
return "break"
def unit_up(self, event):
self.canvas.yview_scroll(-1, "unit")
return "break"
def unit_down(self, event):
self.canvas.yview_scroll(1, "unit")
return "break"
def zoom_height(self, event):
ZoomHeight.zoom_height(self.master)
return "break"
def _tree_widget(parent):
root = Tk()
root.title("Test TreeWidget")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
sc = ScrolledCanvas(root, bg="white", highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both", side=LEFT)
item = FileTreeItem(os.getcwd())
node = TreeNode(sc.canvas, None, item)
node.expand()
root.mainloop()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_tree_widget)
| mit |
goddardl/gaffer | python/GafferUI/PlugLayout.py | 1 | 22086 | ##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2014, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import sys
import functools
import collections
import Gaffer
import GafferUI
QtGui = GafferUI._qtImport( "QtGui" )
## A class for laying out widgets to represent all the plugs held on a particular parent.
#
# Per-plug metadata support :
#
# - "layout:index" controls ordering of plugs within the layout
# - "layout:section" places the plug in a named section of the layout
# - "divider" specifies whether or not a plug should be followed by a divider
# - "layout:widgetType" the class name for the widget type of a particular plug
# - "layout:activator" the name of an activator to control editability
#
# Per-parent metadata support :
#
# - layout:section:sectionName:summary" dynamic metadata entry returning a
# string to be used as a summary for the section.
# - layout:section:sectionName:collapsed" boolean indicating whether or
# not a section should be collapsed initially.
#
# Per-node metadata support :
#
# - "layout:activator:activatorName" a dynamic boolean metadata entry to control
# the activation of plugs within the layout
# - "layout:activators" a dynamic metadata entry returning a CompoundData of booleans
# for several named activators.
#
# ## Custom widgets
#
# Custom widgets unassociated with any specific plugs may also be added to plug layouts.
# This can be useful when customising user interfaces for a particular facility - for instance
# to display asset management information for each node.
#
# A custom widget is specified using parent metadata entries starting with
# "layout:customWidget:Name:" prefixes, where "Name" is a unique identifier for the
# custom widget :
#
# - "layout:customWidget:Name:widgetType" specifies a string containing the fully qualified
# name of a python callable which will be used to create the widget. This callable will be passed
# the same parent GraphComponent (node or plug) that the PlugLayout is being created for.
# - "layout:customWidget:Name:*" as for the standard per-plug "layout:*" metadata, so custom
# widgets may be assigned to a section, reordered, given activators etc.
#
class PlugLayout( GafferUI.Widget ) :
def __init__( self, parent, orientation = GafferUI.ListContainer.Orientation.Vertical, **kw ) :
assert( isinstance( parent, ( Gaffer.Node, Gaffer.Plug ) ) )
self.__layout = _TabLayout( orientation ) if isinstance( parent, Gaffer.Node ) else _CollapsibleLayout( orientation )
GafferUI.Widget.__init__( self, self.__layout, **kw )
self.__parent = parent
self.__readOnly = False
# we need to connect to the childAdded/childRemoved signals on
# the parent so we can update the ui when plugs are added and removed.
self.__childAddedConnection = parent.childAddedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ) )
self.__childRemovedConnection = parent.childRemovedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ) )
# since our layout is driven by metadata, we must respond dynamically
# to changes in that metadata.
self.__metadataChangedConnection = Gaffer.Metadata.plugValueChangedSignal().connect( Gaffer.WeakMethod( self.__plugMetadataChanged ) )
# and since our activations are driven by plug values, we must respond
# when the plugs are dirtied.
self.__plugDirtiedConnection = self.__node().plugDirtiedSignal().connect( Gaffer.WeakMethod( self.__plugDirtied ) )
# frequently events that trigger a ui update come in batches, so we
# perform the update lazily using a LazyMethod. the dirty variables
# keep track of the work we'll need to do in the update.
self.__layoutDirty = True
self.__activationsDirty = True
self.__summariesDirty = True
# mapping from layout item to widget, where the key is either a plug or
# the name of a custom widget (as returned by layoutOrder()).
self.__widgets = {}
self.__rootSection = _Section( self.__parent )
# schedule our first update, which will take place when we become
# visible for the first time.
self.__updateLazily()
def getReadOnly( self ) :
return self.__readOnly
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
self.__readOnly = readOnly
if self.__readOnly :
for widget in self.__widgets.values() :
self.__applyReadOnly( widget, self.__readOnly )
else :
self.__updateActivations()
## Returns a PlugValueWidget representing the specified child plug.
# Because the layout is built lazily on demand, this might return None due
# to the user not having opened up the ui - in this case lazy=False may
# be passed to force the creation of the ui.
def plugValueWidget( self, childPlug, lazy=True ) :
if not lazy and len( self.__widgets ) == 0 :
self.__update()
w = self.__widgets.get( childPlug, None )
if w is None :
return w
elif isinstance( w, GafferUI.PlugValueWidget ) :
return w
else :
return w.plugValueWidget()
## Returns the custom widget registered with the specified name.
# Because the layout is built lazily on demand, this might return None due
# to the user not having opened up the ui - in this case lazy=False may
# be passed to force the creation of the ui.
def customWidget( self, name, lazy=True ) :
if not lazy and len( self.__widgets ) == 0 :
self.__update()
return self.__widgets.get( name )
## Returns the list of section names that will be used when laying
# out the plugs of the specified parent. The sections are returned
# in the order in which they will be created.
@classmethod
def layoutSections( cls, parent, includeCustomWidgets = False ) :
d = collections.OrderedDict()
for item in cls.layoutOrder( parent, includeCustomWidgets ) :
sectionPath = cls.__staticSectionPath(item, parent)
sectionName = ".".join( sectionPath )
d[sectionName] = 1
return d.keys()
## Returns the child plugs of the parent in the order in which they
# will be laid out, based on "layout:index" Metadata entries. If
# includeCustomWidgets is True, then the positions of custom widgets
# are represented by the appearance of the names of the widgets as
# strings within the list. If a section name is specified, then the
# result will be filtered to include only items in that section.
@classmethod
def layoutOrder( cls, parent, includeCustomWidgets = False, section = None ) :
items = parent.children( Gaffer.Plug )
items = [ plug for plug in items if not plug.getName().startswith( "__" ) ]
if includeCustomWidgets :
if isinstance( parent, Gaffer.Node ) :
metadataNames = Gaffer.Metadata.registeredNodeValues( parent )
else :
metadataNames = Gaffer.Metadata.registeredPlugValues( parent )
for name in metadataNames :
m = re.match( "layout:customWidget:(.+):widgetType", name )
if m :
items.append( m.group( 1 ) )
itemsAndIndices = [ list( x ) for x in enumerate( items ) ]
for itemAndIndex in itemsAndIndices :
index = cls.__staticItemMetadataValue( itemAndIndex[1], "index", parent )
if index is not None :
index = index if index >= 0 else sys.maxint + index
itemAndIndex[0] = index
itemsAndIndices.sort( key = lambda x : x[0] )
if section is not None :
sectionPath = section.split( "." ) if section else []
itemsAndIndices = [ x for x in itemsAndIndices if cls.__staticSectionPath( x[1], parent ) == sectionPath ]
return [ x[1] for x in itemsAndIndices ]
@GafferUI.LazyMethod()
def __updateLazily( self ) :
self.__update()
def __update( self ) :
if self.__layoutDirty :
self.__updateLayout()
self.__layoutDirty = False
if self.__activationsDirty :
self.__updateActivations()
self.__activationsDirty = False
if self.__summariesDirty :
self.__updateSummariesWalk( self.__rootSection )
self.__summariesDirty = False
# delegate to our layout class to create a concrete
# layout from the section definitions.
self.__layout.update( self.__rootSection )
def __updateLayout( self ) :
# get the items to lay out - these are a combination
# of plugs and strings representing custom widgets.
items = self.layoutOrder( self.__parent, includeCustomWidgets = True )
# ditch widgets we don't need any more
itemsSet = set( items )
self.__widgets = { k : v for k, v in self.__widgets.items() if k in itemsSet }
# make (or reuse existing) widgets for each item, and sort them into
# sections.
self.__rootSection.clear()
for item in items :
if item not in self.__widgets :
if isinstance( item, Gaffer.Plug ) :
widget = self.__createPlugWidget( item )
else :
widget = self.__createCustomWidget( item )
self.__widgets[item] = widget
else :
widget = self.__widgets[item]
if widget is None :
continue
section = self.__rootSection
for sectionName in self.__sectionPath( item ) :
section = section.subsection( sectionName )
section.widgets.append( widget )
if self.__itemMetadataValue( item, "divider" ) :
section.widgets.append( GafferUI.Divider(
GafferUI.Divider.Orientation.Horizontal if self.__layout.orientation() == GafferUI.ListContainer.Orientation.Vertical else GafferUI.Divider.Orientation.Vertical
) )
def __updateActivations( self ) :
if self.getReadOnly() :
return
activators = Gaffer.Metadata.nodeValue( self.__node(), "layout:activators" ) or {}
activators = { k : v.value for k, v in activators.items() } # convert CompoundData of BoolData to dict of booleans
for item, widget in self.__widgets.items() :
active = True
activatorName = self.__itemMetadataValue( item, "activator" )
if activatorName :
active = activators.get( activatorName )
if active is None :
active = Gaffer.Metadata.nodeValue( self.__node(), "layout:activator:" + activatorName )
active = active if active is not None else False
activators[activatorName] = active
self.__applyReadOnly( widget, not active )
def __updateSummariesWalk( self, section ) :
section.summary = self.__metadataValue( self.__parent, "layout:section:" + section.fullName + ":summary" ) or ""
for subsection in section.subsections.values() :
self.__updateSummariesWalk( subsection )
def __import( self, path ) :
path = path.split( "." )
result = __import__( path[0] )
for n in path[1:] :
result = getattr( result, n )
return result
def __createPlugWidget( self, plug ) :
widgetType = Gaffer.Metadata.plugValue( plug, "layout:widgetType" )
if widgetType is not None :
if widgetType == "None" :
return None
else :
widgetClass = self.__import( widgetType )
result = widgetClass( plug )
else :
result = GafferUI.PlugValueWidget.create( plug )
if result is None :
return result
if isinstance( result, GafferUI.PlugValueWidget ) and not result.hasLabel() and Gaffer.Metadata.plugValue( plug, "label" ) != "" :
result = GafferUI.PlugWidget( result )
if self.__layout.orientation() == GafferUI.ListContainer.Orientation.Horizontal :
# undo the annoying fixed size the PlugWidget has applied
# to the label.
## \todo Shift all the label size fixing out of PlugWidget and just fix the
# widget here if we're in a vertical orientation.
QWIDGETSIZE_MAX = 16777215 # qt #define not exposed by PyQt or PySide
result.labelPlugValueWidget().label()._qtWidget().setFixedWidth( QWIDGETSIZE_MAX )
self.__applyReadOnly( result, self.getReadOnly() )
return result
def __createCustomWidget( self, name ) :
widgetType = self.__itemMetadataValue( name, "widgetType" )
widgetClass = self.__import( widgetType )
return widgetClass( self.__parent )
def __node( self ) :
return self.__parent if isinstance( self.__parent, Gaffer.Node ) else self.__parent.node()
@classmethod
def __metadataValue( cls, plugOrNode, name ) :
if isinstance( plugOrNode, Gaffer.Node ) :
return Gaffer.Metadata.nodeValue( plugOrNode, name )
else :
return Gaffer.Metadata.plugValue( plugOrNode, name )
@classmethod
def __staticItemMetadataValue( cls, item, name, parent ) :
if isinstance( item, Gaffer.Plug ) :
##\todo Update "divider" and "label" items to use prefix too
if name not in ( "divider", "label" ) :
name = "layout:" + name
return Gaffer.Metadata.plugValue( item, name )
else :
return cls.__metadataValue( parent, "layout:customWidget:" + item + ":" + name )
def __itemMetadataValue( self, item, name ) :
return self.__staticItemMetadataValue( item, name, parent = self.__parent )
@classmethod
def __staticSectionPath( cls, item, parent ) :
m = None
if isinstance( parent, Gaffer.Node ) :
# Backwards compatibility with old metadata entry
## \todo Remove
m = cls.__staticItemMetadataValue( item, "nodeUI:section", parent )
if m == "header" :
m = ""
if m is None :
m = cls.__staticItemMetadataValue( item, "section", parent )
return m.split( "." ) if m else []
def __sectionPath( self, item ) :
return self.__staticSectionPath( item, parent = self.__parent )
def __childAddedOrRemoved( self, *unusedArgs ) :
# typically many children are added and removed at once, so
# we do a lazy update so we can batch up several changes into one.
# upheaval is over.
self.__layoutDirty = True
self.__updateLazily()
def __applyReadOnly( self, widget, readOnly ) :
if widget is None :
return
if hasattr( widget, "setReadOnly" ) :
widget.setReadOnly( readOnly )
elif isinstance( widget, GafferUI.PlugWidget ) :
widget.labelPlugValueWidget().setReadOnly( readOnly )
widget.plugValueWidget().setReadOnly( readOnly )
elif hasattr( widget, "plugValueWidget" ) :
widget.plugValueWidget().setReadOnly( readOnly )
def __plugMetadataChanged( self, nodeTypeId, plugPath, key, plug ) :
if not self.visible() :
return
if plug is not None and not self.__parent.isSame( plug.parent() ) :
return
if not self.__node().isInstanceOf( nodeTypeId ) :
return
if key in ( "divider", "layout:index", "layout:section" ) :
# we often see sequences of several metadata changes - so
# we schedule a lazy update to batch them into one ui update.
self.__layoutDirty = True
self.__updateLazily()
def __plugDirtied( self, plug ) :
if not self.visible() or plug.direction() != plug.Direction.In :
return
self.__activationsDirty = True
self.__summariesDirty = True
self.__updateLazily()
# The _Section class provides a simple abstract representation of a hierarchical
# layout. Each section contains a list of widgets to be displayed in that section,
# and an OrderedDict of named subsections.
class _Section( object ) :
def __init__( self, _parent, _fullName = "" ) :
self.__parent = _parent
self.fullName = _fullName
self.clear()
def subsection( self, name ) :
result = self.subsections.get( name )
if result is not None :
return result
result = _Section(
self.__parent,
self.fullName + "." + name if self.fullName else name
)
self.subsections[name] = result
return result
def clear( self ) :
self.widgets = []
self.subsections = collections.OrderedDict()
self.summary = ""
def saveState( self, name, value ) :
if isinstance( self.__parent, Gaffer.Node ) :
Gaffer.Metadata.registerNodeValue( self.__parent, self.__stateName( name ), value, persistent = False )
else :
Gaffer.Metadata.registerPlugValue( self.__parent, self.__stateName( name ), value, persistent = False )
def restoreState( self, name ) :
if isinstance( self.__parent, Gaffer.Node ) :
return Gaffer.Metadata.nodeValue( self.__parent, self.__stateName( name ) )
else :
return Gaffer.Metadata.plugValue( self.__parent, self.__stateName( name ) )
def __stateName( self, name ) :
return "layout:section:" + self.fullName + ":" + name
# The PlugLayout class deals with all the details of plugs, metadata and
# signals to define an abstract layout in terms of _Sections. It then
# delegates to the _Layout classes to create an actual layout in terms
# of Widgets. This allows us to present different layouts based on whether
# or the parent is a node (tabbed layout) or a plug (collapsible layout).
class _Layout( GafferUI.Widget ) :
def __init__( self, topLevelWidget, orientation, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
self.__orientation = orientation
def orientation( self ) :
return self.__orientation
def update( self, section ) :
raise NotImplementedError
class _TabLayout( _Layout ) :
def __init__( self, orientation, **kw ) :
self.__mainColumn = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
_Layout.__init__( self, self.__mainColumn, orientation, **kw )
with self.__mainColumn :
self.__widgetsColumn = GafferUI.ListContainer( self.orientation(), spacing = 4, borderWidth = 4 )
self.__tabbedContainer = GafferUI.TabbedContainer()
self.__currentTabChangedConnection = self.__tabbedContainer.currentChangedSignal().connect(
Gaffer.WeakMethod( self.__currentTabChanged )
)
def update( self, section ) :
self.__section = section
self.__widgetsColumn[:] = section.widgets
existingTabs = collections.OrderedDict()
for tab in self.__tabbedContainer[:] :
existingTabs[self.__tabbedContainer.getLabel( tab )] = tab
updatedTabs = collections.OrderedDict()
for name, subsection in section.subsections.items() :
tab = existingTabs.get( name )
if tab is None :
tab = GafferUI.ScrolledContainer( borderWidth = 8 )
if self.orientation() == GafferUI.ListContainer.Orientation.Vertical :
tab.setHorizontalMode( GafferUI.ScrolledContainer.ScrollMode.Never )
else :
tab.setVerticalMode( GafferUI.ScrolledContainer.ScrollMode.Never )
tab.setChild( _CollapsibleLayout( self.orientation() ) )
tab.getChild().update( subsection )
updatedTabs[name] = tab
if existingTabs.keys() != updatedTabs.keys() :
with Gaffer.BlockedConnection( self.__currentTabChangedConnection ) :
del self.__tabbedContainer[:]
for name, tab in updatedTabs.items() :
self.__tabbedContainer.append( tab, label = name )
if not len( existingTabs ) :
currentTabIndex = self.__section.restoreState( "currentTab" ) or 0
if currentTabIndex < len( self.__tabbedContainer ) :
self.__tabbedContainer.setCurrent( self.__tabbedContainer[currentTabIndex] )
self.__widgetsColumn.setVisible( len( section.widgets ) )
self.__tabbedContainer.setVisible( len( self.__tabbedContainer ) )
def __currentTabChanged( self, tabbedContainer, currentTab ) :
self.__section.saveState( "currentTab", tabbedContainer.index( currentTab ) )
class _CollapsibleLayout( _Layout ) :
def __init__( self, orientation, **kw ) :
self.__column = GafferUI.ListContainer( orientation, spacing = 4 )
_Layout.__init__( self, self.__column, orientation, **kw )
self.__collapsibles = {} # Indexed by section name
def update( self, section ) :
widgets = list( section.widgets )
for name, subsection in section.subsections.items() :
collapsible = self.__collapsibles.get( name )
if collapsible is None :
collapsible = GafferUI.Collapsible( name, _CollapsibleLayout( self.orientation() ), borderWidth = 2, collapsed = True )
collapsible.setCornerWidget( GafferUI.Label(), True )
## \todo This is fighting the default sizing applied in the Label constructor. Really we need a standard
# way of controlling size behaviours for all widgets in the public API.
collapsible.getCornerWidget()._qtWidget().setSizePolicy( QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed )
if subsection.restoreState( "collapsed" ) is False :
collapsible.setCollapsed( False )
collapsible.__stateChangedConnection = collapsible.stateChangedSignal().connect(
functools.partial( Gaffer.WeakMethod( self.__collapsibleStateChanged ), subsection = subsection )
)
self.__collapsibles[name] = collapsible
collapsible.getChild().update( subsection )
collapsible.getCornerWidget().setText(
"<small>" + " ( " + subsection.summary + " )</small>" if subsection.summary else ""
)
widgets.append( collapsible )
self.__column[:] = widgets
def __collapsibleStateChanged( self, collapsible, subsection ) :
subsection.saveState( "collapsed", collapsible.getCollapsed() )
| bsd-3-clause |
jmartinm/InvenioAuthorLists | modules/bibindex/lib/bibindex_engine.py | 3 | 84073 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibIndex indexing engine implementation. See bibindex executable for entry point.
"""
__revision__ = "$Id$"
import os
import re
import sys
import time
import urllib2
import logging
from invenio.config import \
CFG_BIBINDEX_CHARS_ALPHANUMERIC_SEPARATORS, \
CFG_BIBINDEX_CHARS_PUNCTUATION, \
CFG_BIBINDEX_FULLTEXT_INDEX_LOCAL_FILES_ONLY, \
CFG_BIBINDEX_MIN_WORD_LENGTH, \
CFG_BIBINDEX_REMOVE_HTML_MARKUP, \
CFG_BIBINDEX_REMOVE_LATEX_MARKUP, \
CFG_BIBINDEX_AUTHOR_WORD_INDEX_EXCLUDE_FIRST_NAMES, \
CFG_BIBINDEX_SYNONYM_KBRS, \
CFG_CERN_SITE, CFG_INSPIRE_SITE, \
CFG_BIBINDEX_PERFORM_OCR_ON_DOCNAMES, \
CFG_BIBINDEX_SPLASH_PAGES, \
CFG_SOLR_URL
from invenio.websubmit_config import CFG_WEBSUBMIT_BEST_FORMATS_TO_EXTRACT_TEXT_FROM
from invenio.bibindex_engine_config import CFG_MAX_MYSQL_THREADS, \
CFG_MYSQL_THREAD_TIMEOUT, \
CFG_CHECK_MYSQL_THREADS
from invenio.bibindex_engine_tokenizer import BibIndexFuzzyNameTokenizer, \
BibIndexExactNameTokenizer
from invenio.bibdocfile import bibdocfile_url_p, \
bibdocfile_url_to_bibdoc, normalize_format, \
download_url, guess_format_from_url, BibRecDocs
from invenio.websubmit_file_converter import convert_file, get_file_converter_logger
from invenio.search_engine import perform_request_search, strip_accents, \
wash_index_term, lower_index_term, get_index_stemming_language, \
get_synonym_terms
from invenio.dbquery import run_sql, DatabaseError, serialize_via_marshal, \
deserialize_via_marshal
from invenio.bibindex_engine_stopwords import is_stopword
from invenio.bibindex_engine_stemmer import stem
from invenio.bibtask import task_init, write_message, get_datetime, \
task_set_option, task_get_option, task_get_task_param, task_update_status, \
task_update_progress, task_sleep_now_if_required
from invenio.intbitset import intbitset
from invenio.errorlib import register_exception
from invenio.htmlutils import remove_html_markup, get_links_in_html_page
from invenio.textutils import wash_for_utf8
from invenio.search_engine_utils import get_fieldvalues
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
# FIXME: journal tag and journal pubinfo standard format are defined here:
if CFG_CERN_SITE:
CFG_JOURNAL_TAG = '773__%'
CFG_JOURNAL_PUBINFO_STANDARD_FORM = "773__p 773__v (773__y) 773__c"
elif CFG_INSPIRE_SITE:
CFG_JOURNAL_TAG = '773__%'
CFG_JOURNAL_PUBINFO_STANDARD_FORM = "773__p,773__v,773__c"
else:
CFG_JOURNAL_TAG = '909C4%'
CFG_JOURNAL_PUBINFO_STANDARD_FORM = "909C4p 909C4v (909C4y) 909C4c"
## precompile some often-used regexp for speed reasons:
re_subfields = re.compile('\$\$\w')
re_block_punctuation_begin = re.compile(r"^"+CFG_BIBINDEX_CHARS_PUNCTUATION+"+")
re_block_punctuation_end = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION+"+$")
re_punctuation = re.compile(CFG_BIBINDEX_CHARS_PUNCTUATION)
re_separators = re.compile(CFG_BIBINDEX_CHARS_ALPHANUMERIC_SEPARATORS)
re_datetime_shift = re.compile("([-\+]{0,1})([\d]+)([dhms])")
re_arxiv = re.compile(r'^arxiv:\d\d\d\d\.\d\d\d\d')
nb_char_in_line = 50 # for verbose pretty printing
chunksize = 1000 # default size of chunks that the records will be treated by
base_process_size = 4500 # process base size
_last_word_table = None
def list_union(list1, list2):
"Returns union of the two lists."
union_dict = {}
for e in list1:
union_dict[e] = 1
for e in list2:
union_dict[e] = 1
return union_dict.keys()
## safety function for killing slow DB threads:
def kill_sleepy_mysql_threads(max_threads=CFG_MAX_MYSQL_THREADS, thread_timeout=CFG_MYSQL_THREAD_TIMEOUT):
"""Check the number of DB threads and if there are more than
MAX_THREADS of them, lill all threads that are in a sleeping
state for more than THREAD_TIMEOUT seconds. (This is useful
for working around the the max_connection problem that appears
during indexation in some not-yet-understood cases.) If some
threads are to be killed, write info into the log file.
"""
res = run_sql("SHOW FULL PROCESSLIST")
if len(res) > max_threads:
for row in res:
r_id, dummy, dummy, dummy, r_command, r_time, dummy, dummy = row
if r_command == "Sleep" and int(r_time) > thread_timeout:
run_sql("KILL %s", (r_id,))
write_message("WARNING: too many DB threads, killing thread %s" % r_id, verbose=1)
return
def get_associated_subfield_value(recID, tag, value, associated_subfield_code):
"""Return list of ASSOCIATED_SUBFIELD_CODE, if exists, for record
RECID and TAG of value VALUE. Used by fulltext indexer only.
Note: TAG must be 6 characters long (tag+ind1+ind2+sfcode),
otherwise en empty string is returned.
FIXME: what if many tag values have the same value but different
associated_subfield_code? Better use bibrecord library for this.
"""
out = ""
if len(tag) != 6:
return out
bibXXx = "bib" + tag[0] + tag[1] + "x"
bibrec_bibXXx = "bibrec_" + bibXXx
query = """SELECT bb.field_number, b.tag, b.value FROM %s AS b, %s AS bb
WHERE bb.id_bibrec=%%s AND bb.id_bibxxx=b.id AND tag LIKE
%%s%%""" % (bibXXx, bibrec_bibXXx)
res = run_sql(query, (recID, tag[:-1]))
field_number = -1
for row in res:
if row[1] == tag and row[2] == value:
field_number = row[0]
if field_number > 0:
for row in res:
if row[0] == field_number and row[1] == tag[:-1] + associated_subfield_code:
out = row[2]
break
return out
def get_field_tags(field):
"""Returns a list of MARC tags for the field code 'field'.
Returns empty list in case of error.
Example: field='author', output=['100__%','700__%']."""
out = []
query = """SELECT t.value FROM tag AS t, field_tag AS ft, field AS f
WHERE f.code=%s AND ft.id_field=f.id AND t.id=ft.id_tag
ORDER BY ft.score DESC"""
res = run_sql(query, (field, ))
return [row[0] for row in res]
def get_words_from_journal_tag(recID, tag):
"""
Special procedure to extract words from journal tags. Joins
title/volume/year/page into a standard form that is also used for
citations.
"""
# get all journal tags/subfields:
bibXXx = "bib" + tag[0] + tag[1] + "x"
bibrec_bibXXx = "bibrec_" + bibXXx
query = """SELECT bb.field_number,b.tag,b.value FROM %s AS b, %s AS bb
WHERE bb.id_bibrec=%%s
AND bb.id_bibxxx=b.id AND tag LIKE %%s""" % (bibXXx, bibrec_bibXXx)
res = run_sql(query, (recID, tag))
# construct journal pubinfo:
dpubinfos = {}
for row in res:
nb_instance, subfield, value = row
if subfield.endswith("c"):
# delete pageend if value is pagestart-pageend
# FIXME: pages may not be in 'c' subfield
value = value.split('-', 1)[0]
if dpubinfos.has_key(nb_instance):
dpubinfos[nb_instance][subfield] = value
else:
dpubinfos[nb_instance] = {subfield: value}
# construct standard format:
lwords = []
for dpubinfo in dpubinfos.values():
# index all journal subfields separately
for tag,val in dpubinfo.items():
lwords.append(val)
# index journal standard format:
pubinfo = CFG_JOURNAL_PUBINFO_STANDARD_FORM
for tag,val in dpubinfo.items():
pubinfo = pubinfo.replace(tag,val)
if CFG_JOURNAL_TAG[:-1] in pubinfo:
# some subfield was missing, do nothing
pass
else:
lwords.append(pubinfo)
# return list of words and pubinfos:
return lwords
def get_author_canonical_ids_for_recid(recID):
"""
Return list of author canonical IDs (e.g. `J.Ellis.1') for the
given record. Done by consulting BibAuthorID module.
"""
from invenio.bibauthorid_personid_tables_utils import get_persons_from_recids
lwords = []
res = get_persons_from_recids([recID])
if res is None:
## BibAuthorID is not enabled
return lwords
else:
dpersons, dpersoninfos = res
for aid in dpersoninfos.keys():
author_canonical_id = dpersoninfos[aid].get('canonical_id', '')
if author_canonical_id:
lwords.append(author_canonical_id)
return lwords
def get_words_from_date_tag(datestring, stemming_language=None):
"""
Special procedure to index words from tags storing date-like
information in format YYYY or YYYY-MM or YYYY-MM-DD. Namely, we
are indexing word-terms YYYY, YYYY-MM, YYYY-MM-DD, but never
standalone MM or DD.
"""
out = []
for dateword in datestring.split():
# maybe there are whitespaces, so break these too
out.append(dateword)
parts = dateword.split('-')
for nb in range(1,len(parts)):
out.append("-".join(parts[:nb]))
return out
def get_words_from_fulltext(url_direct_or_indirect, stemming_language=None):
"""Returns all the words contained in the document specified by
URL_DIRECT_OR_INDIRECT with the words being split by various
SRE_SEPARATORS regexp set earlier. If FORCE_FILE_EXTENSION is
set (e.g. to "pdf", then treat URL_DIRECT_OR_INDIRECT as a PDF
file. (This is interesting to index Indico for example.) Note
also that URL_DIRECT_OR_INDIRECT may be either a direct URL to
the fulltext file or an URL to a setlink-like page body that
presents the links to be indexed. In the latter case the
URL_DIRECT_OR_INDIRECT is parsed to extract actual direct URLs
to fulltext documents, for all knows file extensions as
specified by global CONV_PROGRAMS config variable.
"""
re_perform_ocr = re.compile(CFG_BIBINDEX_PERFORM_OCR_ON_DOCNAMES)
write_message("... reading fulltext files from %s started" % url_direct_or_indirect, verbose=2)
try:
if bibdocfile_url_p(url_direct_or_indirect):
write_message("... %s is an internal document" % url_direct_or_indirect, verbose=2)
bibdoc = bibdocfile_url_to_bibdoc(url_direct_or_indirect)
perform_ocr = bool(re_perform_ocr.match(bibdoc.get_docname()))
write_message("... will extract words from %s (docid: %s) %s" % (bibdoc.get_docname(), bibdoc.get_id(), perform_ocr and 'with OCR' or ''), verbose=2)
if not bibdoc.has_text(require_up_to_date=True):
bibdoc.extract_text(perform_ocr=perform_ocr)
if CFG_SOLR_URL:
# we are relying on Solr to provide full-text indexing, so do
# nothing here (FIXME: dispatch indexing to Solr)
return []
else:
return get_words_from_phrase(bibdoc.get_text(), stemming_language)
else:
if CFG_BIBINDEX_FULLTEXT_INDEX_LOCAL_FILES_ONLY:
write_message("... %s is external URL but indexing only local files" % url_direct_or_indirect, verbose=2)
return []
write_message("... %s is an external URL" % url_direct_or_indirect, verbose=2)
urls_to_index = set()
for splash_re, url_re in CFG_BIBINDEX_SPLASH_PAGES.iteritems():
if re.match(splash_re, url_direct_or_indirect):
write_message("... %s is a splash page (%s)" % (url_direct_or_indirect, splash_re), verbose=2)
html = urllib2.urlopen(url_direct_or_indirect).read()
urls = get_links_in_html_page(html)
write_message("... found these URLs in %s splash page: %s" % (url_direct_or_indirect, ", ".join(urls)), verbose=3)
for url in urls:
if re.match(url_re, url):
write_message("... will index %s (matched by %s)" % (url, url_re), verbose=2)
urls_to_index.add(url)
if not urls_to_index:
urls_to_index.add(url_direct_or_indirect)
write_message("... will extract words from %s" % ', '.join(urls_to_index), verbose=2)
words = {}
for url in urls_to_index:
tmpdoc = download_url(url)
file_converter_logger = get_file_converter_logger()
old_logging_level = file_converter_logger.getEffectiveLevel()
if task_get_task_param("verbose") > 3:
file_converter_logger.setLevel(logging.DEBUG)
try:
try:
tmptext = convert_file(tmpdoc, output_format='.txt')
text = open(tmptext).read()
os.remove(tmptext)
if CFG_SOLR_URL:
# we are relying on Solr to provide full-text indexing, so do
# nothing here (FIXME: dispatch indexing to Solr)
tmpwords = []
else:
tmpwords = get_words_from_phrase(text, stemming_language)
words.update(dict(map(lambda x: (x, 1), tmpwords)))
except Exception, e:
message = 'ERROR: it\'s impossible to correctly extract words from %s referenced by %s: %s' % (url, url_direct_or_indirect, e)
register_exception(prefix=message, alert_admin=True)
write_message(message, stream=sys.stderr)
finally:
os.remove(tmpdoc)
if task_get_task_param("verbose") > 3:
file_converter_logger.setLevel(old_logging_level)
return words.keys()
except Exception, e:
message = 'ERROR: it\'s impossible to correctly extract words from %s: %s' % (url_direct_or_indirect, e)
register_exception(prefix=message, alert_admin=True)
write_message(message, stream=sys.stderr)
return []
latex_markup_re = re.compile(r"\\begin(\[.+?\])?\{.+?\}|\\end\{.+?}|\\\w+(\[.+?\])?\{(?P<inside1>.*?)\}|\{\\\w+ (?P<inside2>.*?)\}")
def remove_latex_markup(phrase):
ret_phrase = ''
index = 0
for match in latex_markup_re.finditer(phrase):
ret_phrase += phrase[index:match.start()]
ret_phrase += match.group('inside1') or match.group('inside2') or ''
index = match.end()
ret_phrase += phrase[index:]
return ret_phrase
def get_nothing_from_phrase(phrase, stemming_language=None):
""" A dump implementation of get_words_from_phrase to be used when
when a tag should not be indexed (such as when trying to extract phrases from
8564_u)."""
return []
def swap_temporary_reindex_tables(index_id, reindex_prefix="tmp_"):
"""Atomically swap reindexed temporary table with the original one.
Delete the now-old one."""
write_message("Putting new tmp index tables for id %s into production" % index_id)
run_sql(
"RENAME TABLE " +
"idxWORD%02dR TO old_idxWORD%02dR," % (index_id, index_id) +
"%sidxWORD%02dR TO idxWORD%02dR," % (reindex_prefix, index_id, index_id) +
"idxWORD%02dF TO old_idxWORD%02dF," % (index_id, index_id) +
"%sidxWORD%02dF TO idxWORD%02dF," % (reindex_prefix, index_id, index_id) +
"idxPAIR%02dR TO old_idxPAIR%02dR," % (index_id, index_id) +
"%sidxPAIR%02dR TO idxPAIR%02dR," % (reindex_prefix, index_id, index_id) +
"idxPAIR%02dF TO old_idxPAIR%02dF," % (index_id, index_id) +
"%sidxPAIR%02dF TO idxPAIR%02dF," % (reindex_prefix, index_id, index_id) +
"idxPHRASE%02dR TO old_idxPHRASE%02dR," % (index_id, index_id) +
"%sidxPHRASE%02dR TO idxPHRASE%02dR," % (reindex_prefix, index_id, index_id) +
"idxPHRASE%02dF TO old_idxPHRASE%02dF," % (index_id, index_id) +
"%sidxPHRASE%02dF TO idxPHRASE%02dF;" % (reindex_prefix, index_id, index_id)
)
write_message("Dropping old index tables for id %s" % index_id)
run_sql("DROP TABLE old_idxWORD%02dR, old_idxWORD%02dF, old_idxPAIR%02dR, old_idxPAIR%02dF, old_idxPHRASE%02dR, old_idxPHRASE%02dF" % (index_id, index_id, index_id, index_id, index_id, index_id)
)
def init_temporary_reindex_tables(index_id, reindex_prefix="tmp_"):
"""Create reindexing temporary tables."""
write_message("Creating new tmp index tables for id %s" % index_id)
run_sql("""DROP TABLE IF EXISTS %sidxWORD%02dF""" % (reindex_prefix, index_id))
run_sql("""CREATE TABLE %sidxWORD%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(50) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
run_sql("""DROP TABLE IF EXISTS %sidxWORD%02dR""" % (reindex_prefix, index_id))
run_sql("""CREATE TABLE %sidxWORD%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
run_sql("""DROP TABLE IF EXISTS %sidxPAIR%02dF""" % (reindex_prefix, index_id))
run_sql("""CREATE TABLE %sidxPAIR%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term varchar(100) default NULL,
hitlist longblob,
PRIMARY KEY (id),
UNIQUE KEY term (term)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
run_sql("""DROP TABLE IF EXISTS %sidxPAIR%02dR""" % (reindex_prefix, index_id))
run_sql("""CREATE TABLE %sidxPAIR%02dR (
id_bibrec mediumint(9) unsigned NOT NULL,
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
run_sql("""DROP TABLE IF EXISTS %sidxPHRASE%02dF""" % (reindex_prefix, index_id))
run_sql("""CREATE TABLE %sidxPHRASE%02dF (
id mediumint(9) unsigned NOT NULL auto_increment,
term text default NULL,
hitlist longblob,
PRIMARY KEY (id),
KEY term (term(50))
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
run_sql("""DROP TABLE IF EXISTS %sidxPHRASE%02dR""" % (reindex_prefix, index_id))
run_sql("""CREATE TABLE %sidxPHRASE%02dR (
id_bibrec mediumint(9) unsigned NOT NULL default '0',
termlist longblob,
type enum('CURRENT','FUTURE','TEMPORARY') NOT NULL default 'CURRENT',
PRIMARY KEY (id_bibrec,type)
) ENGINE=MyISAM""" % (reindex_prefix, index_id))
run_sql("UPDATE idxINDEX SET last_updated='0000-00-00 00:00:00' WHERE id=%s", (index_id,))
latex_formula_re = re.compile(r'\$.*?\$|\\\[.*?\\\]')
def get_words_from_phrase(phrase, stemming_language=None):
"""Return list of words found in PHRASE. Note that the phrase is
split into groups depending on the alphanumeric characters and
punctuation characters definition present in the config file.
"""
words = {}
formulas = []
if CFG_BIBINDEX_REMOVE_HTML_MARKUP and phrase.find("</") > -1:
phrase = remove_html_markup(phrase)
if CFG_BIBINDEX_REMOVE_LATEX_MARKUP:
formulas = latex_formula_re.findall(phrase)
phrase = remove_latex_markup(phrase)
phrase = latex_formula_re.sub(' ', phrase)
phrase = wash_for_utf8(phrase)
phrase = lower_index_term(phrase)
# 1st split phrase into blocks according to whitespace
for block in strip_accents(phrase).split():
# 2nd remove leading/trailing punctuation and add block:
block = re_block_punctuation_begin.sub("", block)
block = re_block_punctuation_end.sub("", block)
if block:
if stemming_language:
block = apply_stemming_and_stopwords_and_length_check(block, stemming_language)
if block:
words[block] = 1
if re_arxiv.match(block):
# special case for blocks like `arXiv:1007.5048' where
# we would like to index the part after the colon
# regardless of dot or other punctuation characters:
words[block.split(':', 1)[1]] = 1
# 3rd break each block into subblocks according to punctuation and add subblocks:
for subblock in re_punctuation.split(block):
if stemming_language:
subblock = apply_stemming_and_stopwords_and_length_check(subblock, stemming_language)
if subblock:
words[subblock] = 1
# 4th break each subblock into alphanumeric groups and add groups:
for alphanumeric_group in re_separators.split(subblock):
if stemming_language:
alphanumeric_group = apply_stemming_and_stopwords_and_length_check(alphanumeric_group, stemming_language)
if alphanumeric_group:
words[alphanumeric_group] = 1
for block in formulas:
words[block] = 1
return words.keys()
def get_pairs_from_phrase(phrase, stemming_language=None):
"""Return list of words found in PHRASE. Note that the phrase is
split into groups depending on the alphanumeric characters and
punctuation characters definition present in the config file.
"""
words = {}
if CFG_BIBINDEX_REMOVE_HTML_MARKUP and phrase.find("</") > -1:
phrase = remove_html_markup(phrase)
if CFG_BIBINDEX_REMOVE_LATEX_MARKUP:
phrase = remove_latex_markup(phrase)
phrase = latex_formula_re.sub(' ', phrase)
phrase = wash_for_utf8(phrase)
phrase = lower_index_term(phrase)
# 1st split phrase into blocks according to whitespace
last_word = ''
for block in strip_accents(phrase).split():
# 2nd remove leading/trailing punctuation and add block:
block = re_block_punctuation_begin.sub("", block)
block = re_block_punctuation_end.sub("", block)
if block:
if stemming_language:
block = apply_stemming_and_stopwords_and_length_check(block, stemming_language)
# 3rd break each block into subblocks according to punctuation and add subblocks:
for subblock in re_punctuation.split(block):
if stemming_language:
subblock = apply_stemming_and_stopwords_and_length_check(subblock, stemming_language)
if subblock:
# 4th break each subblock into alphanumeric groups and add groups:
for alphanumeric_group in re_separators.split(subblock):
if stemming_language:
alphanumeric_group = apply_stemming_and_stopwords_and_length_check(alphanumeric_group, stemming_language)
if alphanumeric_group:
if last_word:
words['%s %s' % (last_word, alphanumeric_group)] = 1
last_word = alphanumeric_group
return words.keys()
phrase_delimiter_re = re.compile(r'[\.:;\?\!]')
space_cleaner_re = re.compile(r'\s+')
def get_phrases_from_phrase(phrase, stemming_language=None):
"""Return list of phrases found in PHRASE. Note that the phrase is
split into groups depending on the alphanumeric characters and
punctuation characters definition present in the config file.
"""
phrase = wash_for_utf8(phrase)
return [phrase]
## Note that we don't break phrases, they are used for exact style
## of searching.
words = {}
phrase = strip_accents(phrase)
# 1st split phrase into blocks according to whitespace
for block1 in phrase_delimiter_re.split(strip_accents(phrase)):
block1 = block1.strip()
if block1 and stemming_language:
new_words = []
for block2 in re_punctuation.split(block1):
block2 = block2.strip()
if block2:
for block3 in block2.split():
block3 = block3.strip()
if block3:
# Note that we don't stem phrases, they
# are used for exact style of searching.
new_words.append(block3)
block1 = ' '.join(new_words)
if block1:
words[block1] = 1
return words.keys()
def get_fuzzy_authors_from_phrase(phrase, stemming_language=None):
"""
Return list of fuzzy phrase-tokens suitable for storing into
author phrase index.
"""
author_tokenizer = BibIndexFuzzyNameTokenizer()
return author_tokenizer.tokenize(phrase)
def get_exact_authors_from_phrase(phrase, stemming_language=None):
"""
Return list of exact phrase-tokens suitable for storing into
exact author phrase index.
"""
author_tokenizer = BibIndexExactNameTokenizer()
return author_tokenizer.tokenize(phrase)
def get_author_family_name_words_from_phrase(phrase, stemming_language=None):
"""
Return list of words from author family names, not his/her first
names. The phrase is assumed to be the full author name. This is
useful for CFG_BIBINDEX_AUTHOR_WORD_INDEX_EXCLUDE_FIRST_NAMES.
"""
d_family_names = {}
# first, treat everything before first comma as surname:
if ',' in phrase:
d_family_names[phrase.split(',', 1)[0]] = 1
# second, try fuzzy author tokenizer to find surname variants:
for name in get_fuzzy_authors_from_phrase(phrase, stemming_language):
if ',' in name:
d_family_names[name.split(',', 1)[0]] = 1
# now extract words from these surnames:
d_family_names_words = {}
for family_name in d_family_names.keys():
for word in get_words_from_phrase(family_name, stemming_language):
d_family_names_words[word] = 1
return d_family_names_words.keys()
def apply_stemming_and_stopwords_and_length_check(word, stemming_language):
"""Return WORD after applying stemming and stopword and length checks.
See the config file in order to influence these.
"""
# now check against stopwords:
if is_stopword(word):
return ""
# finally check the word length:
if len(word) < CFG_BIBINDEX_MIN_WORD_LENGTH:
return ""
# stem word, when configured so:
if stemming_language:
word = stem(word, stemming_language)
return word
def remove_subfields(s):
"Removes subfields from string, e.g. 'foo $$c bar' becomes 'foo bar'."
return re_subfields.sub(' ', s)
def get_index_id_from_index_name(index_name):
"""Returns the words/phrase index id for INDEXNAME.
Returns empty string in case there is no words table for this index.
Example: field='author', output=4."""
out = 0
query = """SELECT w.id FROM idxINDEX AS w
WHERE w.name=%s LIMIT 1"""
res = run_sql(query, (index_name, ), 1)
if res:
out = res[0][0]
return out
def get_index_name_from_index_id(index_id):
"""Returns the words/phrase index name for INDEXID.
Returns '' in case there is no words table for this indexid.
Example: field=9, output='fulltext'."""
res = run_sql("SELECT name FROM idxINDEX WHERE id=%s", (index_id, ))
if res:
return res[0][0]
return ''
def get_index_tags(indexname):
"""Returns the list of tags that are indexed inside INDEXNAME.
Returns empty list in case there are no tags indexed in this index.
Note: uses get_field_tags() defined before.
Example: field='author', output=['100__%', '700__%']."""
out = []
query = """SELECT f.code FROM idxINDEX AS w, idxINDEX_field AS wf,
field AS f WHERE w.name=%s AND w.id=wf.id_idxINDEX
AND f.id=wf.id_field"""
res = run_sql(query, (indexname, ))
for row in res:
out.extend(get_field_tags(row[0]))
return out
def get_all_indexes():
"""Returns the list of the names of all defined words indexes.
Returns empty list in case there are no tags indexed in this index.
Example: output=['global', 'author']."""
out = []
query = """SELECT name FROM idxINDEX"""
res = run_sql(query)
for row in res:
out.append(row[0])
return out
def split_ranges(parse_string):
"""Parse a string a return the list or ranges."""
recIDs = []
ranges = parse_string.split(",")
for arange in ranges:
tmp_recIDs = arange.split("-")
if len(tmp_recIDs)==1:
recIDs.append([int(tmp_recIDs[0]), int(tmp_recIDs[0])])
else:
if int(tmp_recIDs[0]) > int(tmp_recIDs[1]): # sanity check
tmp = tmp_recIDs[0]
tmp_recIDs[0] = tmp_recIDs[1]
tmp_recIDs[1] = tmp
recIDs.append([int(tmp_recIDs[0]), int(tmp_recIDs[1])])
return recIDs
def get_word_tables(tables):
""" Given a list of table names it return a list of tuples
(index_id, index_name, index_tags).
If tables is empty it returns the whole list."""
wordTables = []
if tables:
indexes = tables.split(",")
for index in indexes:
index_id = get_index_id_from_index_name(index)
if index_id:
wordTables.append((index_id, index, get_index_tags(index)))
else:
write_message("Error: There is no %s words table." % index, sys.stderr)
else:
for index in get_all_indexes():
index_id = get_index_id_from_index_name(index)
wordTables.append((index_id, index, get_index_tags(index)))
return wordTables
def get_date_range(var):
"Returns the two dates contained as a low,high tuple"
limits = var.split(",")
if len(limits)==1:
low = get_datetime(limits[0])
return low, None
if len(limits)==2:
low = get_datetime(limits[0])
high = get_datetime(limits[1])
return low, high
return None, None
def create_range_list(res):
"""Creates a range list from a recID select query result contained
in res. The result is expected to have ascending numerical order."""
if not res:
return []
row = res[0]
if not row:
return []
else:
range_list = [[row, row]]
for row in res[1:]:
row_id = row
if row_id == range_list[-1][1] + 1:
range_list[-1][1] = row_id
else:
range_list.append([row_id, row_id])
return range_list
def beautify_range_list(range_list):
"""Returns a non overlapping, maximal range list"""
ret_list = []
for new in range_list:
found = 0
for old in ret_list:
if new[0] <= old[0] <= new[1] + 1 or new[0] - 1 <= old[1] <= new[1]:
old[0] = min(old[0], new[0])
old[1] = max(old[1], new[1])
found = 1
break
if not found:
ret_list.append(new)
return ret_list
def truncate_index_table(index_name):
"""Properly truncate the given index."""
index_id = get_index_id_from_index_name(index_name)
if index_id:
write_message('Truncating %s index table in order to reindex.' % index_name, verbose=2)
run_sql("UPDATE idxINDEX SET last_updated='0000-00-00 00:00:00' WHERE id=%s", (index_id,))
run_sql("TRUNCATE idxWORD%02dF" % index_id)
run_sql("TRUNCATE idxWORD%02dR" % index_id)
run_sql("TRUNCATE idxPHRASE%02dF" % index_id)
run_sql("TRUNCATE idxPHRASE%02dR" % index_id)
def update_index_last_updated(index_id, starting_time=None):
"""Update last_updated column of the index table in the database.
Puts starting time there so that if the task was interrupted for record download,
the records will be reindexed next time."""
if starting_time is None:
return None
write_message("updating last_updated to %s..." % starting_time, verbose=9)
return run_sql("UPDATE idxINDEX SET last_updated=%s WHERE id=%s",
(starting_time, index_id,))
#def update_text_extraction_date(first_recid, last_recid):
#"""for all the bibdoc connected to the specified recid, set
#the text_extraction_date to the task_starting_time."""
#run_sql("UPDATE bibdoc JOIN bibrec_bibdoc ON id=id_bibdoc SET text_extraction_date=%s WHERE id_bibrec BETWEEN %s AND %s", (task_get_task_param('task_starting_time'), first_recid, last_recid))
class WordTable:
"A class to hold the words table."
def __init__(self, index_name, index_id, fields_to_index, table_name_pattern, default_get_words_fnc, tag_to_words_fnc_map, wash_index_terms=50, is_fulltext_index=False):
"""Creates words table instance.
@param index_name: the index name
@param index_id: the index integer identificator
@param fields_to_index: a list of fields to index
@param table_name_pattern: i.e. idxWORD%02dF or idxPHRASE%02dF
@parm default_get_words_fnc: the default function called to extract words from a metadata
@param tag_to_words_fnc_map: a mapping to specify particular function to
extract words from particular metdata (such as 8564_u)
@param wash_index_terms: do we wash index terms, and if yes (when >0),
how many characters do we keep in the index terms; see
max_char_length parameter of wash_index_term()
"""
self.index_name = index_name
self.index_id = index_id
self.tablename = table_name_pattern % index_id
self.recIDs_in_mem = []
self.fields_to_index = fields_to_index
self.value = {}
self.stemming_language = get_index_stemming_language(index_id)
self.is_fulltext_index = is_fulltext_index
self.wash_index_terms = wash_index_terms
# tagToFunctions mapping. It offers an indirection level necessary for
# indexing fulltext. The default is get_words_from_phrase
self.tag_to_words_fnc_map = tag_to_words_fnc_map
self.default_get_words_fnc = default_get_words_fnc
if self.stemming_language and self.tablename.startswith('idxWORD'):
write_message('%s has stemming enabled, language %s' % (self.tablename, self.stemming_language))
def get_field(self, recID, tag):
"""Returns list of values of the MARC-21 'tag' fields for the
record 'recID'."""
out = []
bibXXx = "bib" + tag[0] + tag[1] + "x"
bibrec_bibXXx = "bibrec_" + bibXXx
query = """SELECT value FROM %s AS b, %s AS bb
WHERE bb.id_bibrec=%%s AND bb.id_bibxxx=b.id
AND tag LIKE %%s""" % (bibXXx, bibrec_bibXXx)
res = run_sql(query, (recID, tag))
for row in res:
out.append(row[0])
return out
def clean(self):
"Cleans the words table."
self.value = {}
def put_into_db(self, mode="normal"):
"""Updates the current words table in the corresponding DB
idxFOO table. Mode 'normal' means normal execution,
mode 'emergency' means words index reverting to old state.
"""
write_message("%s %s wordtable flush started" % (self.tablename, mode))
write_message('...updating %d words into %s started' % \
(len(self.value), self.tablename))
task_update_progress("%s flushed %d/%d words" % (self.tablename, 0, len(self.value)))
self.recIDs_in_mem = beautify_range_list(self.recIDs_in_mem)
if mode == "normal":
for group in self.recIDs_in_mem:
query = """UPDATE %sR SET type='TEMPORARY' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='CURRENT'""" % self.tablename[:-1]
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
nb_words_total = len(self.value)
nb_words_report = int(nb_words_total/10.0)
nb_words_done = 0
for word in self.value.keys():
self.put_word_into_db(word)
nb_words_done += 1
if nb_words_report != 0 and ((nb_words_done % nb_words_report) == 0):
write_message('......processed %d/%d words' % (nb_words_done, nb_words_total))
task_update_progress("%s flushed %d/%d words" % (self.tablename, nb_words_done, nb_words_total))
write_message('...updating %d words into %s ended' % \
(nb_words_total, self.tablename))
write_message('...updating reverse table %sR started' % self.tablename[:-1])
if mode == "normal":
for group in self.recIDs_in_mem:
query = """UPDATE %sR SET type='CURRENT' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='FUTURE'""" % self.tablename[:-1]
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
query = """DELETE FROM %sR WHERE id_bibrec
BETWEEN %%s AND %%s AND type='TEMPORARY'""" % self.tablename[:-1]
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
#if self.is_fulltext_index:
#update_text_extraction_date(group[0], group[1])
write_message('End of updating wordTable into %s' % self.tablename, verbose=9)
elif mode == "emergency":
for group in self.recIDs_in_mem:
query = """UPDATE %sR SET type='CURRENT' WHERE id_bibrec
BETWEEN %%s AND %%s AND type='TEMPORARY'""" % self.tablename[:-1]
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
query = """DELETE FROM %sR WHERE id_bibrec
BETWEEN %%s AND %%s AND type='FUTURE'""" % self.tablename[:-1]
write_message(query % (group[0], group[1]), verbose=9)
run_sql(query, (group[0], group[1]))
write_message('End of emergency flushing wordTable into %s' % self.tablename, verbose=9)
write_message('...updating reverse table %sR ended' % self.tablename[:-1])
self.clean()
self.recIDs_in_mem = []
write_message("%s %s wordtable flush ended" % (self.tablename, mode))
task_update_progress("%s flush ended" % (self.tablename))
def load_old_recIDs(self, word):
"""Load existing hitlist for the word from the database index files."""
query = "SELECT hitlist FROM %s WHERE term=%%s" % self.tablename
res = run_sql(query, (word,))
if res:
return intbitset(res[0][0])
else:
return None
def merge_with_old_recIDs(self, word, set):
"""Merge the system numbers stored in memory (hash of recIDs with value +1 or -1
according to whether to add/delete them) with those stored in the database index
and received in set universe of recIDs for the given word.
Return False in case no change was done to SET, return True in case SET
was changed.
"""
oldset = intbitset(set)
set.update_with_signs(self.value[word])
return set != oldset
def put_word_into_db(self, word):
"""Flush a single word to the database and delete it from memory"""
set = self.load_old_recIDs(word)
if set is not None: # merge the word recIDs found in memory:
if not self.merge_with_old_recIDs(word,set):
# nothing to update:
write_message("......... unchanged hitlist for ``%s''" % word, verbose=9)
pass
else:
# yes there were some new words:
write_message("......... updating hitlist for ``%s''" % word, verbose=9)
run_sql("UPDATE %s SET hitlist=%%s WHERE term=%%s" % self.tablename,
(set.fastdump(), word))
else: # the word is new, will create new set:
write_message("......... inserting hitlist for ``%s''" % word, verbose=9)
set = intbitset(self.value[word].keys())
try:
run_sql("INSERT INTO %s (term, hitlist) VALUES (%%s, %%s)" % self.tablename,
(word, set.fastdump()))
except Exception, e:
## We send this exception to the admin only when is not
## already reparing the problem.
register_exception(prefix="Error when putting the term '%s' into db (hitlist=%s): %s\n" % (repr(word), set, e), alert_admin=(task_get_option('cmd') != 'repair'))
if not set: # never store empty words
run_sql("DELETE from %s WHERE term=%%s" % self.tablename,
(word,))
del self.value[word]
def display(self):
"Displays the word table."
keys = self.value.keys()
keys.sort()
for k in keys:
write_message("%s: %s" % (k, self.value[k]))
def count(self):
"Returns the number of words in the table."
return len(self.value)
def info(self):
"Prints some information on the words table."
write_message("The words table contains %d words." % self.count())
def lookup_words(self, word=""):
"Lookup word from the words table."
if not word:
done = 0
while not done:
try:
word = raw_input("Enter word: ")
done = 1
except (EOFError, KeyboardInterrupt):
return
if self.value.has_key(word):
write_message("The word '%s' is found %d times." \
% (word, len(self.value[word])))
else:
write_message("The word '%s' does not exist in the word file."\
% word)
def add_recIDs(self, recIDs, opt_flush):
"""Fetches records which id in the recIDs range list and adds
them to the wordTable. The recIDs range list is of the form:
[[i1_low,i1_high],[i2_low,i2_high], ..., [iN_low,iN_high]].
"""
global chunksize, _last_word_table
flush_count = 0
records_done = 0
records_to_go = 0
for arange in recIDs:
records_to_go = records_to_go + arange[1] - arange[0] + 1
time_started = time.time() # will measure profile time
for arange in recIDs:
i_low = arange[0]
chunksize_count = 0
while i_low <= arange[1]:
# calculate chunk group of recIDs and treat it:
i_high = min(i_low+opt_flush-flush_count-1,arange[1])
i_high = min(i_low+chunksize-chunksize_count-1, i_high)
try:
self.chk_recID_range(i_low, i_high)
except StandardError, e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception(alert_admin=True)
task_update_status("ERROR")
self.put_into_db()
sys.exit(1)
write_message("%s adding records #%d-#%d started" % \
(self.tablename, i_low, i_high))
if CFG_CHECK_MYSQL_THREADS:
kill_sleepy_mysql_threads()
task_update_progress("%s adding recs %d-%d" % (self.tablename, i_low, i_high))
self.del_recID_range(i_low, i_high)
just_processed = self.add_recID_range(i_low, i_high)
flush_count = flush_count + i_high - i_low + 1
chunksize_count = chunksize_count + i_high - i_low + 1
records_done = records_done + just_processed
write_message("%s adding records #%d-#%d ended " % \
(self.tablename, i_low, i_high))
if chunksize_count >= chunksize:
chunksize_count = 0
# flush if necessary:
if flush_count >= opt_flush:
self.put_into_db()
self.clean()
write_message("%s backing up" % (self.tablename))
flush_count = 0
self.log_progress(time_started,records_done,records_to_go)
# iterate:
i_low = i_high + 1
if flush_count > 0:
self.put_into_db()
self.log_progress(time_started,records_done,records_to_go)
def add_recIDs_by_date(self, dates, opt_flush):
"""Add records that were modified between DATES[0] and DATES[1].
If DATES is not set, then add records that were modified since
the last update of the index.
"""
if not dates:
table_id = self.tablename[-3:-1]
query = """SELECT last_updated FROM idxINDEX WHERE id=%s"""
res = run_sql(query, (table_id, ))
if not res:
return
if not res[0][0]:
dates = ("0000-00-00", None)
else:
dates = (res[0][0], None)
if dates[1] is None:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b
WHERE b.modification_date >= %s""",
(dates[0],)))
if self.is_fulltext_index:
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id WHERE text_extraction_date <= modification_date AND modification_date >= %s AND status<>'DELETED'""", (dates[0], )))
elif dates[0] is None:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b
WHERE b.modification_date <= %s""",
(dates[1],)))
if self.is_fulltext_index:
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id WHERE text_extraction_date <= modification_date AND modification_date <= %s AND status<>'DELETED'""", (dates[1], )))
else:
res = intbitset(run_sql("""SELECT b.id FROM bibrec AS b
WHERE b.modification_date >= %s AND
b.modification_date <= %s""",
(dates[0], dates[1])))
if self.is_fulltext_index:
res |= intbitset(run_sql("""SELECT id_bibrec FROM bibrec_bibdoc JOIN bibdoc ON id_bibdoc=id WHERE text_extraction_date <= modification_date AND modification_date >= %s AND modification_date <= %s AND status<>'DELETED'""", (dates[0], dates[1], )))
alist = create_range_list(list(res))
if not alist:
write_message( "No new records added. %s is up to date" % self.tablename)
else:
self.add_recIDs(alist, opt_flush)
# special case of author indexes where we need to re-index
# those records that were affected by changed BibAuthorID
# attributions:
if self.index_name in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor'):
from invenio.bibauthorid_personid_tables_utils import get_recids_affected_since
# dates[1] is ignored, since BibAuthorID API does not offer upper limit search
alist = create_range_list(get_recids_affected_since(dates[0]))
if not alist:
write_message( "No new records added by author canonical IDs. %s is up to date" % self.tablename)
else:
self.add_recIDs(alist, opt_flush)
def add_recID_range(self, recID1, recID2):
"""Add records from RECID1 to RECID2."""
wlist = {}
self.recIDs_in_mem.append([recID1,recID2])
# special case of author indexes where we also add author
# canonical IDs:
if self.index_name in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor'):
for recID in range(recID1, recID2 + 1):
if not wlist.has_key(recID):
wlist[recID] = []
wlist[recID] = list_union(get_author_canonical_ids_for_recid(recID),
wlist[recID])
# special case of journal index:
if self.fields_to_index == [CFG_JOURNAL_TAG]:
# FIXME: quick hack for the journal index; a special
# treatment where we need to associate more than one
# subfield into indexed term
for recID in range(recID1, recID2 + 1):
new_words = get_words_from_journal_tag(recID, self.fields_to_index[0])
if not wlist.has_key(recID):
wlist[recID] = []
wlist[recID] = list_union(new_words, wlist[recID])
else:
# usual tag-by-tag indexing:
for tag in self.fields_to_index:
get_words_function = self.tag_to_words_fnc_map.get(tag, self.default_get_words_fnc)
bibXXx = "bib" + tag[0] + tag[1] + "x"
bibrec_bibXXx = "bibrec_" + bibXXx
query = """SELECT bb.id_bibrec,b.value FROM %s AS b, %s AS bb
WHERE bb.id_bibrec BETWEEN %%s AND %%s
AND bb.id_bibxxx=b.id AND tag LIKE %%s""" % (bibXXx, bibrec_bibXXx)
res = run_sql(query, (recID1, recID2, tag))
if tag == '8564_u':
## FIXME: Quick hack to be sure that hidden files are
## actually indexed.
res = set(res)
for recid in xrange(int(recID1), int(recID2) + 1):
for bibdocfile in BibRecDocs(recid).list_latest_files():
res.add((recid, bibdocfile.get_url()))
for row in res:
recID,phrase = row
if not wlist.has_key(recID):
wlist[recID] = []
new_words = get_words_function(phrase, stemming_language=self.stemming_language) # ,self.separators
wlist[recID] = list_union(new_words, wlist[recID])
# lookup index-time synonyms:
if CFG_BIBINDEX_SYNONYM_KBRS.has_key(self.index_name):
if len(wlist) == 0: return 0
recIDs = wlist.keys()
for recID in recIDs:
for word in wlist[recID]:
word_synonyms = get_synonym_terms(word,
CFG_BIBINDEX_SYNONYM_KBRS[self.index_name][0],
CFG_BIBINDEX_SYNONYM_KBRS[self.index_name][1])
if word_synonyms:
wlist[recID] = list_union(word_synonyms, wlist[recID])
# were there some words for these recIDs found?
if len(wlist) == 0: return 0
recIDs = wlist.keys()
for recID in recIDs:
# was this record marked as deleted?
if "DELETED" in self.get_field(recID, "980__c"):
wlist[recID] = []
write_message("... record %d was declared deleted, removing its word list" % recID, verbose=9)
write_message("... record %d, termlist: %s" % (recID, wlist[recID]), verbose=9)
# put words into reverse index table with FUTURE status:
for recID in recIDs:
run_sql("INSERT INTO %sR (id_bibrec,termlist,type) VALUES (%%s,%%s,'FUTURE')" % self.tablename[:-1],
(recID, serialize_via_marshal(wlist[recID])))
# ... and, for new records, enter the CURRENT status as empty:
try:
run_sql("INSERT INTO %sR (id_bibrec,termlist,type) VALUES (%%s,%%s,'CURRENT')" % self.tablename[:-1],
(recID, serialize_via_marshal([])))
except DatabaseError:
# okay, it's an already existing record, no problem
pass
# put words into memory word list:
put = self.put
for recID in recIDs:
for w in wlist[recID]:
put(recID, w, 1)
return len(recIDs)
def log_progress(self, start, done, todo):
"""Calculate progress and store it.
start: start time,
done: records processed,
todo: total number of records"""
time_elapsed = time.time() - start
# consistency check
if time_elapsed == 0 or done > todo:
return
time_recs_per_min = done/(time_elapsed/60.0)
write_message("%d records took %.1f seconds to complete.(%1.f recs/min)"\
% (done, time_elapsed, time_recs_per_min))
if time_recs_per_min:
write_message("Estimated runtime: %.1f minutes" % \
((todo-done)/time_recs_per_min))
def put(self, recID, word, sign):
"""Adds/deletes a word to the word list."""
try:
if self.wash_index_terms:
word = wash_index_term(word, self.wash_index_terms)
if self.value.has_key(word):
# the word 'word' exist already: update sign
self.value[word][recID] = sign
else:
self.value[word] = {recID: sign}
except:
write_message("Error: Cannot put word %s with sign %d for recID %s." % (word, sign, recID))
def del_recIDs(self, recIDs):
"""Fetches records which id in the recIDs range list and adds
them to the wordTable. The recIDs range list is of the form:
[[i1_low,i1_high],[i2_low,i2_high], ..., [iN_low,iN_high]].
"""
count = 0
for arange in recIDs:
self.del_recID_range(arange[0],arange[1])
count = count + arange[1] - arange[0]
self.put_into_db()
def del_recID_range(self, low, high):
"""Deletes records with 'recID' system number between low
and high from memory words index table."""
write_message("%s fetching existing words for records #%d-#%d started" % \
(self.tablename, low, high), verbose=3)
self.recIDs_in_mem.append([low,high])
query = """SELECT id_bibrec,termlist FROM %sR as bb WHERE bb.id_bibrec
BETWEEN %%s AND %%s""" % (self.tablename[:-1])
recID_rows = run_sql(query, (low, high))
for recID_row in recID_rows:
recID = recID_row[0]
wlist = deserialize_via_marshal(recID_row[1])
for word in wlist:
self.put(recID, word, -1)
write_message("%s fetching existing words for records #%d-#%d ended" % \
(self.tablename, low, high), verbose=3)
def report_on_table_consistency(self):
"""Check reverse words index tables (e.g. idxWORD01R) for
interesting states such as 'TEMPORARY' state.
Prints small report (no of words, no of bad words).
"""
# find number of words:
query = """SELECT COUNT(*) FROM %s""" % (self.tablename)
res = run_sql(query, None, 1)
if res:
nb_words = res[0][0]
else:
nb_words = 0
# find number of records:
query = """SELECT COUNT(DISTINCT(id_bibrec)) FROM %sR""" % (self.tablename[:-1])
res = run_sql(query, None, 1)
if res:
nb_records = res[0][0]
else:
nb_records = 0
# report stats:
write_message("%s contains %d words from %d records" % (self.tablename, nb_words, nb_records))
# find possible bad states in reverse tables:
query = """SELECT COUNT(DISTINCT(id_bibrec)) FROM %sR WHERE type <> 'CURRENT'""" % (self.tablename[:-1])
res = run_sql(query)
if res:
nb_bad_records = res[0][0]
else:
nb_bad_records = 999999999
if nb_bad_records:
write_message("EMERGENCY: %s needs to repair %d of %d index records" % \
(self.tablename, nb_bad_records, nb_records))
else:
write_message("%s is in consistent state" % (self.tablename))
return nb_bad_records
def repair(self, opt_flush):
"""Repair the whole table"""
# find possible bad states in reverse tables:
query = """SELECT COUNT(DISTINCT(id_bibrec)) FROM %sR WHERE type <> 'CURRENT'""" % (self.tablename[:-1])
res = run_sql(query, None, 1)
if res:
nb_bad_records = res[0][0]
else:
nb_bad_records = 0
if nb_bad_records == 0:
return
query = """SELECT id_bibrec FROM %sR WHERE type <> 'CURRENT'""" \
% (self.tablename[:-1])
res = intbitset(run_sql(query))
recIDs = create_range_list(list(res))
flush_count = 0
records_done = 0
records_to_go = 0
for arange in recIDs:
records_to_go = records_to_go + arange[1] - arange[0] + 1
time_started = time.time() # will measure profile time
for arange in recIDs:
i_low = arange[0]
chunksize_count = 0
while i_low <= arange[1]:
# calculate chunk group of recIDs and treat it:
i_high = min(i_low+opt_flush-flush_count-1,arange[1])
i_high = min(i_low+chunksize-chunksize_count-1, i_high)
try:
self.fix_recID_range(i_low, i_high)
except StandardError, e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception(alert_admin=True)
task_update_status("ERROR")
self.put_into_db()
sys.exit(1)
flush_count = flush_count + i_high - i_low + 1
chunksize_count = chunksize_count + i_high - i_low + 1
records_done = records_done + i_high - i_low + 1
if chunksize_count >= chunksize:
chunksize_count = 0
# flush if necessary:
if flush_count >= opt_flush:
self.put_into_db("emergency")
self.clean()
flush_count = 0
self.log_progress(time_started,records_done,records_to_go)
# iterate:
i_low = i_high + 1
if flush_count > 0:
self.put_into_db("emergency")
self.log_progress(time_started,records_done,records_to_go)
write_message("%s inconsistencies repaired." % self.tablename)
def chk_recID_range(self, low, high):
"""Check if the reverse index table is in proper state"""
## check db
query = """SELECT COUNT(*) FROM %sR WHERE type <> 'CURRENT'
AND id_bibrec BETWEEN %%s AND %%s""" % self.tablename[:-1]
res = run_sql(query, (low, high), 1)
if res[0][0]==0:
write_message("%s for %d-%d is in consistent state" % (self.tablename,low,high))
return # okay, words table is consistent
## inconsistency detected!
write_message("EMERGENCY: %s inconsistencies detected..." % self.tablename)
error_message = "Errors found. You should check consistency of the " \
"%s - %sR tables.\nRunning 'bibindex --repair' is " \
"recommended." % (self.tablename, self.tablename[:-1])
write_message("EMERGENCY: " + error_message, stream=sys.stderr)
raise StandardError, error_message
def fix_recID_range(self, low, high):
"""Try to fix reverse index database consistency (e.g. table idxWORD01R) in the low,high doc-id range.
Possible states for a recID follow:
CUR TMP FUT: very bad things have happened: warn!
CUR TMP : very bad things have happened: warn!
CUR FUT: delete FUT (crash before flushing)
CUR : database is ok
TMP FUT: add TMP to memory and del FUT from memory
flush (revert to old state)
TMP : very bad things have happened: warn!
FUT: very bad things have happended: warn!
"""
state = {}
query = "SELECT id_bibrec,type FROM %sR WHERE id_bibrec BETWEEN %%s AND %%s"\
% self.tablename[:-1]
res = run_sql(query, (low, high))
for row in res:
if not state.has_key(row[0]):
state[row[0]]=[]
state[row[0]].append(row[1])
ok = 1 # will hold info on whether we will be able to repair
for recID in state.keys():
if not 'TEMPORARY' in state[recID]:
if 'FUTURE' in state[recID]:
if 'CURRENT' not in state[recID]:
write_message("EMERGENCY: Index record %d is in inconsistent state. Can't repair it." % recID)
ok = 0
else:
write_message("EMERGENCY: Inconsistency in index record %d detected" % recID)
query = """DELETE FROM %sR
WHERE id_bibrec=%%s""" % self.tablename[:-1]
run_sql(query, (recID, ))
write_message("EMERGENCY: Inconsistency in record %d repaired." % recID)
else:
if 'FUTURE' in state[recID] and not 'CURRENT' in state[recID]:
self.recIDs_in_mem.append([recID,recID])
# Get the words file
query = """SELECT type,termlist FROM %sR
WHERE id_bibrec=%%s""" % self.tablename[:-1]
write_message(query, verbose=9)
res = run_sql(query, (recID, ))
for row in res:
wlist = deserialize_via_marshal(row[1])
write_message("Words are %s " % wlist, verbose=9)
if row[0] == 'TEMPORARY':
sign = 1
else:
sign = -1
for word in wlist:
self.put(recID, word, sign)
else:
write_message("EMERGENCY: %s for %d is in inconsistent "
"state. Couldn't repair it." % (self.tablename,
recID), stream=sys.stderr)
ok = 0
if not ok:
error_message = "Unrepairable errors found. You should check " \
"consistency of the %s - %sR tables. Deleting affected " \
"TEMPORARY and FUTURE entries from these tables is " \
"recommended; see the BibIndex Admin Guide." % \
(self.tablename, self.tablename[:-1])
write_message("EMERGENCY: " + error_message, stream=sys.stderr)
raise StandardError, error_message
def main():
"""Main that construct all the bibtask."""
task_init(authorization_action='runbibindex',
authorization_msg="BibIndex Task Submission",
description="""Examples:
\t%s -a -i 234-250,293,300-500 -u admin@localhost
\t%s -a -w author,fulltext -M 8192 -v3
\t%s -d -m +4d -A on --flush=10000\n""" % ((sys.argv[0],) * 3), help_specific_usage=""" Indexing options:
-a, --add\t\tadd or update words for selected records
-d, --del\t\tdelete words for selected records
-i, --id=low[-high]\t\tselect according to doc recID
-m, --modified=from[,to]\tselect according to modification date
-c, --collection=c1[,c2]\tselect according to collection
-R, --reindex\treindex the selected indexes from scratch
Repairing options:
-k, --check\t\tcheck consistency for all records in the table(s)
-r, --repair\t\ttry to repair all records in the table(s)
Specific options:
-w, --windex=w1[,w2]\tword/phrase indexes to consider (all)
-M, --maxmem=XXX\tmaximum memory usage in kB (no limit)
-f, --flush=NNN\t\tfull consistent table flush after NNN records (10000)
""",
version=__revision__,
specific_params=("adi:m:c:w:krRM:f:", [
"add",
"del",
"id=",
"modified=",
"collection=",
"windex=",
"check",
"repair",
"reindex",
"maxmem=",
"flush=",
]),
task_stop_helper_fnc=task_stop_table_close_fnc,
task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,
task_run_fnc=task_run_core,
task_submit_check_options_fnc=task_submit_check_options)
def task_submit_check_options():
"""Check for options compatibility."""
if task_get_option("reindex"):
if task_get_option("cmd") != "add" or task_get_option('id') or task_get_option('collection'):
print >> sys.stderr, "ERROR: You can use --reindex only when adding modified record."
return False
return True
def task_submit_elaborate_specific_parameter(key, value, opts, args):
""" Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ['-n', '--number']:
self.options['number'] = value
return True
return False
"""
if key in ("-a", "--add"):
task_set_option("cmd", "add")
if ("-x","") in opts or ("--del","") in opts:
raise StandardError, "Can not have --add and --del at the same time!"
elif key in ("-k", "--check"):
task_set_option("cmd", "check")
elif key in ("-r", "--repair"):
task_set_option("cmd", "repair")
elif key in ("-d", "--del"):
task_set_option("cmd", "del")
elif key in ("-i", "--id"):
task_set_option('id', task_get_option('id') + split_ranges(value))
elif key in ("-m", "--modified"):
task_set_option("modified", get_date_range(value))
elif key in ("-c", "--collection"):
task_set_option("collection", value)
elif key in ("-R", "--reindex"):
task_set_option("reindex", True)
elif key in ("-w", "--windex"):
task_set_option("windex", value)
elif key in ("-M", "--maxmem"):
task_set_option("maxmem", int(value))
if task_get_option("maxmem") < base_process_size + 1000:
raise StandardError, "Memory usage should be higher than %d kB" % \
(base_process_size + 1000)
elif key in ("-f", "--flush"):
task_set_option("flush", int(value))
else:
return False
return True
def task_stop_table_close_fnc():
""" Close tables to STOP. """
global _last_word_table
if _last_word_table:
_last_word_table.put_into_db()
def task_run_core():
"""Runs the task by fetching arguments from the BibSched task queue. This is
what BibSched will be invoking via daemon call.
The task prints Fibonacci numbers for up to NUM on the stdout, and some
messages on stderr.
Return 1 in case of success and 0 in case of failure."""
global _last_word_table
if task_get_option("cmd") == "check":
wordTables = get_word_tables(task_get_option("windex"))
for index_id, index_name, index_tags in wordTables:
if index_name == 'year' and CFG_INSPIRE_SITE:
fnc_get_words_from_phrase = get_words_from_date_tag
elif index_name in ('author', 'firstauthor') and \
CFG_BIBINDEX_AUTHOR_WORD_INDEX_EXCLUDE_FIRST_NAMES:
fnc_get_words_from_phrase = get_author_family_name_words_from_phrase
else:
fnc_get_words_from_phrase = get_words_from_phrase
wordTable = WordTable(index_name=index_name,
index_id=index_id,
fields_to_index=index_tags,
table_name_pattern='idxWORD%02dF',
default_get_words_fnc=fnc_get_words_from_phrase,
tag_to_words_fnc_map={'8564_u': get_words_from_fulltext},
wash_index_terms=50)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
if index_name in ('author', 'firstauthor') and \
CFG_BIBINDEX_AUTHOR_WORD_INDEX_EXCLUDE_FIRST_NAMES:
fnc_get_pairs_from_phrase = get_pairs_from_phrase # FIXME
else:
fnc_get_pairs_from_phrase = get_pairs_from_phrase
wordTable = WordTable(index_name=index_name,
index_id=index_id,
fields_to_index=index_tags,
table_name_pattern='idxPAIR%02dF',
default_get_words_fnc=fnc_get_pairs_from_phrase,
tag_to_words_fnc_map={'8564_u': get_nothing_from_phrase},
wash_index_terms=100)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
if index_name in ('author', 'firstauthor'):
fnc_get_phrases_from_phrase = get_fuzzy_authors_from_phrase
elif index_name in ('exactauthor', 'exactfirstauthor'):
fnc_get_phrases_from_phrase = get_exact_authors_from_phrase
else:
fnc_get_phrases_from_phrase = get_phrases_from_phrase
wordTable = WordTable(index_name=index_name,
index_id=index_id,
fields_to_index=index_tags,
table_name_pattern='idxPHRASE%02dF',
default_get_words_fnc=fnc_get_phrases_from_phrase,
tag_to_words_fnc_map={'8564_u': get_nothing_from_phrase},
wash_index_terms=0)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
_last_word_table = None
return True
# Let's work on single words!
wordTables = get_word_tables(task_get_option("windex"))
for index_id, index_name, index_tags in wordTables:
is_fulltext_index = index_name == 'fulltext'
reindex_prefix = ""
if task_get_option("reindex"):
reindex_prefix = "tmp_"
init_temporary_reindex_tables(index_id, reindex_prefix)
if index_name == 'year' and CFG_INSPIRE_SITE:
fnc_get_words_from_phrase = get_words_from_date_tag
elif index_name in ('author', 'firstauthor') and \
CFG_BIBINDEX_AUTHOR_WORD_INDEX_EXCLUDE_FIRST_NAMES:
fnc_get_words_from_phrase = get_author_family_name_words_from_phrase
else:
fnc_get_words_from_phrase = get_words_from_phrase
wordTable = WordTable(index_name=index_name,
index_id=index_id,
fields_to_index=index_tags,
table_name_pattern=reindex_prefix + 'idxWORD%02dF',
default_get_words_fnc=fnc_get_words_from_phrase,
tag_to_words_fnc_map={'8564_u': get_words_from_fulltext},
is_fulltext_index=is_fulltext_index,
wash_index_terms=50)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id"):
wordTable.del_recIDs(task_get_option("id"))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("collection"):
l_of_colls = task_get_option("collection").split(",")
recIDs = perform_request_search(c=l_of_colls)
recIDs_range = []
for recID in recIDs:
recIDs_range.append([recID,recID])
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.tablename
write_message(error_message, stream=sys.stderr)
raise StandardError, error_message
elif task_get_option("cmd") == "add":
if task_get_option("id"):
wordTable.add_recIDs(task_get_option("id"), task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("collection"):
l_of_colls = task_get_option("collection").split(",")
recIDs = perform_request_search(c=l_of_colls)
recIDs_range = []
for recID in recIDs:
recIDs_range.append([recID,recID])
wordTable.add_recIDs(recIDs_range, task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
wordTable.add_recIDs_by_date(task_get_option("modified"), task_get_option("flush"))
## here we used to update last_updated info, if run via automatic mode;
## but do not update here anymore, since idxPHRASE will be acted upon later
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.tablename
write_message(error_message, stream=sys.stderr)
raise StandardError, error_message
except StandardError, e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception(alert_admin=True)
task_update_status("ERROR")
if _last_word_table:
_last_word_table.put_into_db()
sys.exit(1)
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
# Let's work on pairs now
if index_name in ('author', 'firstauthor') and \
CFG_BIBINDEX_AUTHOR_WORD_INDEX_EXCLUDE_FIRST_NAMES:
fnc_get_pairs_from_phrase = get_pairs_from_phrase # FIXME
else:
fnc_get_pairs_from_phrase = get_pairs_from_phrase
wordTable = WordTable(index_name=index_name,
index_id=index_id,
fields_to_index=index_tags,
table_name_pattern=reindex_prefix + 'idxPAIR%02dF',
default_get_words_fnc=fnc_get_pairs_from_phrase,
tag_to_words_fnc_map={'8564_u': get_nothing_from_phrase},
wash_index_terms=100)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id"):
wordTable.del_recIDs(task_get_option("id"))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("collection"):
l_of_colls = task_get_option("collection").split(",")
recIDs = perform_request_search(c=l_of_colls)
recIDs_range = []
for recID in recIDs:
recIDs_range.append([recID,recID])
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.tablename
write_message(error_message, stream=sys.stderr)
raise StandardError, error_message
elif task_get_option("cmd") == "add":
if task_get_option("id"):
wordTable.add_recIDs(task_get_option("id"), task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("collection"):
l_of_colls = task_get_option("collection").split(",")
recIDs = perform_request_search(c=l_of_colls)
recIDs_range = []
for recID in recIDs:
recIDs_range.append([recID,recID])
wordTable.add_recIDs(recIDs_range, task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
wordTable.add_recIDs_by_date(task_get_option("modified"), task_get_option("flush"))
# let us update last_updated timestamp info, if run via automatic mode:
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.tablename
write_message(error_message, stream=sys.stderr)
raise StandardError, error_message
except StandardError, e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception()
task_update_status("ERROR")
if _last_word_table:
_last_word_table.put_into_db()
sys.exit(1)
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
# Let's work on phrases now
if index_name in ('author', 'firstauthor'):
fnc_get_phrases_from_phrase = get_fuzzy_authors_from_phrase
elif index_name in ('exactauthor', 'exactfirstauthor'):
fnc_get_phrases_from_phrase = get_exact_authors_from_phrase
else:
fnc_get_phrases_from_phrase = get_phrases_from_phrase
wordTable = WordTable(index_name=index_name,
index_id=index_id,
fields_to_index=index_tags,
table_name_pattern=reindex_prefix + 'idxPHRASE%02dF',
default_get_words_fnc=fnc_get_phrases_from_phrase,
tag_to_words_fnc_map={'8564_u': get_nothing_from_phrase},
wash_index_terms=0)
_last_word_table = wordTable
wordTable.report_on_table_consistency()
try:
if task_get_option("cmd") == "del":
if task_get_option("id"):
wordTable.del_recIDs(task_get_option("id"))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("collection"):
l_of_colls = task_get_option("collection").split(",")
recIDs = perform_request_search(c=l_of_colls)
recIDs_range = []
for recID in recIDs:
recIDs_range.append([recID,recID])
wordTable.del_recIDs(recIDs_range)
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Missing IDs of records to delete from " \
"index %s." % wordTable.tablename
write_message(error_message, stream=sys.stderr)
raise StandardError, error_message
elif task_get_option("cmd") == "add":
if task_get_option("id"):
wordTable.add_recIDs(task_get_option("id"), task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("collection"):
l_of_colls = task_get_option("collection").split(",")
recIDs = perform_request_search(c=l_of_colls)
recIDs_range = []
for recID in recIDs:
recIDs_range.append([recID,recID])
wordTable.add_recIDs(recIDs_range, task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
wordTable.add_recIDs_by_date(task_get_option("modified"), task_get_option("flush"))
# let us update last_updated timestamp info, if run via automatic mode:
update_index_last_updated(index_id, task_get_task_param('task_starting_time'))
task_sleep_now_if_required(can_stop_too=True)
elif task_get_option("cmd") == "repair":
wordTable.repair(task_get_option("flush"))
task_sleep_now_if_required(can_stop_too=True)
else:
error_message = "Invalid command found processing %s" % \
wordTable.tablename
write_message(error_message, stream=sys.stderr)
raise StandardError, error_message
except StandardError, e:
write_message("Exception caught: %s" % e, sys.stderr)
register_exception()
task_update_status("ERROR")
if _last_word_table:
_last_word_table.put_into_db()
sys.exit(1)
wordTable.report_on_table_consistency()
task_sleep_now_if_required(can_stop_too=True)
if task_get_option("reindex"):
swap_temporary_reindex_tables(index_id, reindex_prefix)
update_index_last_updated(index_id, task_get_task_param('task_starting_time'))
task_sleep_now_if_required(can_stop_too=True)
_last_word_table = None
return True
## import optional modules:
try:
import psyco
psyco.bind(get_words_from_phrase)
psyco.bind(WordTable.merge_with_old_recIDs)
except:
pass
### okay, here we go:
if __name__ == '__main__':
main()
| gpl-2.0 |
opennode/opennode-management | opennode/oms/endpoint/httprest/auth.py | 2 | 8753 | import json
import hmac
import logging
import time
from base64 import urlsafe_b64encode as encodestring, urlsafe_b64decode as decodestring
from grokcore.component import GlobalUtility, context, name
from grokcore.security import require
from twisted.internet import defer
from twisted.cred.credentials import UsernamePassword
from twisted.cred.error import UnauthorizedLogin
from twisted.web.guard import BasicCredentialFactory
from zope.component import getUtility
from zope.interface import Interface, implements
from opennode.oms.config import get_config
from opennode.oms.model.model.root import OmsRoot
from opennode.oms.endpoint.httprest.base import HttpRestView
from opennode.oms.endpoint.httprest.root import BadRequest, Unauthorized, Forbidden
from opennode.oms.security.authentication import checkers, KeystoneChecker
from opennode.oms.util import blocking_yield
log = logging.getLogger(__name__)
class IHttpRestAuthenticationUtility(Interface):
def get_basic_auth_credentials(request):
"""Returns basic auth credentials object for a given request, or None"""
def authenticate(request, credentials, basic_auth=False):
"""Performs authentication, adds response headers in case of success,
throws HttpStatus exceptions in case of failure. Returns a deferred.
"""
# XXX: use a principal instead of the credentials
def generate_token(self, credentials):
"""Generates a secure token for the given credentials"""
def get_principal(self, token):
"""Retrieves a principal for a token"""
class HttpRestAuthenticationUtility(GlobalUtility):
implements(IHttpRestAuthenticationUtility)
realm = 'OMS'
token_key = get_config().get('auth', 'token_key')
def get_token(self, request):
cookie = request.getCookie('oms_auth_token')
if cookie:
return cookie
header = request.getHeader('X-OMS-Security-Token')
if header:
return header
param = request.args.get('security_token', [None])[0]
if param:
return param
def emit_token(self, request, token):
# Overwrite cookies and headers to avoid duplication, see OMS-101.
# Note, the implementation is somewhat hackish.
request.cookies = [cookie for cookie in request.cookies if not cookie.startswith('oms_auth_token')]
request.addCookie('oms_auth_token', token, path='/')
if request.responseHeaders.hasHeader('X-OMS-Security-Token'):
request.responseHeaders.removeHeader('X-OMS-Security-Token')
request.responseHeaders.addRawHeader('X-OMS-Security-Token', token)
def get_basic_auth_credentials(self, request):
basic_auth = request.requestHeaders.getRawHeaders('Authorization', [None])[0]
if basic_auth:
bc = BasicCredentialFactory(self.realm)
try:
return bc.decode(basic_auth.split(' ')[1], None)
except:
raise BadRequest("The Authorization header was not parsable")
def get_keystone_auth_credentials(self, request):
keystone_token = request.requestHeaders.getRawHeaders('X-Auth-Token', [None])[0]
log.info('Detected keystone token')
log.debug('Token: %s' % keystone_token)
return keystone_token
@defer.inlineCallbacks
def authenticate(self, request, credentials, basic_auth=False):
avatar = None
if credentials:
for i in checkers():
try:
log.debug('Authenticating using %s on %s' % (i, credentials.username))
avatar = yield i.requestAvatarId(credentials)
log.debug('Authentication successful using %s on %s!' % (i, credentials.username))
break
except UnauthorizedLogin:
log.warning('Authentication failed with %s on %s!' % (i, credentials.username))
if avatar:
# XXX: Can replace with renew_token or vice versa
token = self.generate_token(credentials)
self.emit_token(request, token)
defer.returnValue({'status': 'success', 'token': token})
else:
# XXX: Not composable
if basic_auth:
raise Unauthorized({'status': 'failed'})
else:
raise Forbidden({'status': 'failed'})
@defer.inlineCallbacks
def authenticate_keystone(self, request, keystone_token):
log.debug('Keystone token: %s' % keystone_token)
avatar = None
try:
# avatar will be username from the keystone token info
avatar = yield KeystoneChecker().requestAvatarId(keystone_token)
except UnauthorizedLogin:
log.warning('Authentication failed with Keystone token')
log.debug('Token: %s' % keystone_token, exc_info=True)
if avatar:
# emulate OMS behaviour - to allow switchover to OMS-based clients
token = self._generate_token(avatar)
self.emit_token(request, token)
defer.returnValue({'status': 'success', 'token': token})
else:
raise Unauthorized({'status': 'failed'})
def generate_token(self, credentials):
return self._generate_token(credentials.username)
def _generate_token(self, username):
# TODO: register sessions
head = '%s:%s' % (username, int(time.time() * 1000))
signature = hmac.new(self.token_key, head).digest()
return encodestring('%s;%s' % (head, signature)).strip()
def get_principal(self, token):
if not token:
return 'oms.anonymous'
head, signature = decodestring(token).split(';', 1)
if signature != hmac.new(self.token_key, head).digest():
raise Forbidden("Invalid authentication token")
user, timestamp = head.split(':')
if int(timestamp) / 1000.0 + get_config().getint('auth', 'token_ttl') < time.time():
raise Forbidden("Expired authentication token (%s s ago)" %
(time.time() - int(timestamp) / 1000.0))
return user
def renew_token(self, request, token):
new_token = self._generate_token(self.get_principal(token))
self.emit_token(request, new_token)
class AuthView(HttpRestView):
context(OmsRoot)
name('auth')
require('oms.nothing')
realm = 'OMS'
BASIC_AUTH_DEFAULT = 'false'
# Should be render_GET but ONC (i.e. ExtJS) cannot attach a request body to GET requests
def render(self, request):
log.info('Incoming authentication request from %s' % request.getClientIP())
authentication_utility = getUtility(IHttpRestAuthenticationUtility)
# enable basic auth only if explicitly requested
basic_auth = request.args.get('basic_auth', [self.BASIC_AUTH_DEFAULT])[0] != 'false'
body = request.content.getvalue()
if request.args.get('username') and request.args.get('password'):
credentials = UsernamePassword(request.args.get('username')[0],
request.args.get('password')[0])
elif body:
try:
params = json.loads(body)
except ValueError:
raise BadRequest("The request body not JSON-parsable")
# cannot be unicode
username = str(params['username'])
password = str(params['password'])
credentials = UsernamePassword(username, password)
else:
credentials = authentication_utility.get_basic_auth_credentials(request)
# if already authenticated, return success even if the request didn't provide auth credentials
if not credentials and request.interaction.checkPermission('rest', object):
return {'status': 'success'}
# XXX: refactor HttpRestServer.handle_request so that it's not a db.transact
# so that we can use a defer.inlineCallback here
return blocking_yield(authentication_utility.authenticate(request, credentials, basic_auth))
class LogoutView(HttpRestView):
context(OmsRoot)
name('logout')
realm = 'OMS'
def render_GET(self, request):
request.addCookie('oms_auth_token', '', expires='Wed, 01 Jan 2000 00:00:00 GMT')
return {'status': 'success'}
class BasicAuthView(AuthView):
context(OmsRoot)
name('basicauth')
require('oms.nothing')
BASIC_AUTH_DEFAULT = 'true'
class BasicAuthLogoutView(LogoutView):
context(OmsRoot)
name('basicauthlogout')
require('oms.nothing')
def render_GET(self, request):
super(BasicAuthLogoutView, self).render_GET(request)
raise Unauthorized()
| gpl-3.0 |
privateip/ansible | lib/ansible/modules/storage/netapp/netapp_e_snapshot_group.py | 45 | 15206 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: netapp_e_snapshot_group
short_description: Manage snapshot groups
description:
- Create, update, delete snapshot groups for NetApp E-series storage arrays
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
state:
description:
- Whether to ensure the group is present or absent.
required: True
choices:
- present
- absent
name:
description:
- The name to give the snapshot group
required: True
base_volume_name:
description:
- The name of the base volume or thin volume to use as the base for the new snapshot group.
- If a snapshot group with an identical C(name) already exists but with a different base volume
an error will be returned.
required: True
repo_pct:
description:
- The size of the repository in relation to the size of the base volume
required: False
default: 20
warning_threshold:
description:
- The repository utilization warning threshold, as a percentage of the repository volume capacity.
required: False
default: 80
delete_limit:
description:
- The automatic deletion indicator.
- If non-zero, the oldest snapshot image will be automatically deleted when creating a new snapshot image to keep the total number of snapshot images limited to the number specified.
- This value is overridden by the consistency group setting if this snapshot group is associated with a consistency group.
required: False
default: 30
full_policy:
description:
- The behavior on when the data repository becomes full.
- This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
required: False
default: purgepit
choices:
- purgepit
- unknown
- failbasewrites
- __UNDEFINED
storage_pool_name:
required: True
description:
- The name of the storage pool on which to allocate the repository volume.
rollback_priority:
required: False
description:
- The importance of the rollback operation.
- This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
choices:
- highest
- high
- medium
- low
- lowest
- __UNDEFINED
default: medium
"""
EXAMPLES = """
- name: Configure Snapshot group
netapp_e_snapshot_group:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
base_volume_name: SSGroup_test
name=: OOSS_Group
repo_pct: 20
warning_threshold: 85
delete_limit: 30
full_policy: purgepit
storage_pool_name: Disk_Pool_1
rollback_priority: medium
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
sample: json facts for newly created snapshot group.
"""
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
class SnapshotGroup(object):
def __init__(self):
argument_spec = basic_auth_argument_spec()
argument_spec.update(
api_username=dict(type='str', required=True),
api_password=dict(type='str', required=True, no_log=True),
api_url=dict(type='str', required=True),
state=dict(required=True, choices=['present', 'absent']),
base_volume_name=dict(required=True),
name=dict(required=True),
repo_pct=dict(default=20, type='int'),
warning_threshold=dict(default=80, type='int'),
delete_limit=dict(default=30, type='int'),
full_policy=dict(default='purgepit', choices=['unknown', 'failbasewrites', 'purgepit']),
rollback_priority=dict(default='medium', choices=['highest', 'high', 'medium', 'low', 'lowest']),
storage_pool_name=dict(type='str'),
ssid=dict(required=True),
)
self.module = AnsibleModule(argument_spec=argument_spec)
self.post_data = dict()
self.warning_threshold = self.module.params['warning_threshold']
self.base_volume_name = self.module.params['base_volume_name']
self.name = self.module.params['name']
self.repo_pct = self.module.params['repo_pct']
self.delete_limit = self.module.params['delete_limit']
self.full_policy = self.module.params['full_policy']
self.rollback_priority = self.module.params['rollback_priority']
self.storage_pool_name = self.module.params['storage_pool_name']
self.state = self.module.params['state']
self.url = self.module.params['api_url']
self.user = self.module.params['api_username']
self.pwd = self.module.params['api_password']
self.certs = self.module.params['validate_certs']
self.ssid = self.module.params['ssid']
if not self.url.endswith('/'):
self.url += '/'
self.changed = False
@property
def pool_id(self):
pools = 'storage-systems/%s/storage-pools' % self.ssid
url = self.url + pools
try:
(rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd)
except:
err = get_exception()
self.module.fail_json(msg="Snapshot group module - Failed to fetch storage pools. " +
"Id [%s]. Error [%s]." % (self.ssid, str(err)))
for pool in data:
if pool['name'] == self.storage_pool_name:
self.pool_data = pool
return pool['id']
self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
@property
def volume_id(self):
volumes = 'storage-systems/%s/volumes' % self.ssid
url = self.url + volumes
try:
rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
validate_certs=self.certs)
except:
err = get_exception()
self.module.fail_json(msg="Snapshot group module - Failed to fetch volumes. " +
"Id [%s]. Error [%s]." % (self.ssid, str(err)))
qty = 0
for volume in data:
if volume['name'] == self.base_volume_name:
qty += 1
if qty > 1:
self.module.fail_json(msg="More than one volume with the name: %s was found, "
"please ensure your volume has a unique name" % self.base_volume_name)
else:
Id = volume['id']
self.volume = volume
try:
return Id
except NameError:
self.module.fail_json(msg="No volume with the name: %s, was found" % self.base_volume_name)
@property
def snapshot_group_id(self):
url = self.url + 'storage-systems/%s/snapshot-groups' % self.ssid
try:
rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
validate_certs=self.certs)
except:
err = get_exception()
self.module.fail_json(msg="Failed to fetch snapshot groups. " +
"Id [%s]. Error [%s]." % (self.ssid, str(err)))
for ssg in data:
if ssg['name'] == self.name:
self.ssg_data = ssg
return ssg['id']
return None
@property
def ssg_needs_update(self):
if self.ssg_data['fullWarnThreshold'] != self.warning_threshold or \
self.ssg_data['autoDeleteLimit'] != self.delete_limit or \
self.ssg_data['repFullPolicy'] != self.full_policy or \
self.ssg_data['rollbackPriority'] != self.rollback_priority:
return True
else:
return False
def create_snapshot_group(self):
self.post_data = dict(
baseMappableObjectId=self.volume_id,
name=self.name,
repositoryPercentage=self.repo_pct,
warningThreshold=self.warning_threshold,
autoDeleteLimit=self.delete_limit,
fullPolicy=self.full_policy,
storagePoolId=self.pool_id,
)
snapshot = 'storage-systems/%s/snapshot-groups' % self.ssid
url = self.url + snapshot
try:
rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
except:
err = get_exception()
self.module.fail_json(msg="Failed to create snapshot group. " +
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
self.ssid,
str(err)))
if not self.snapshot_group_id:
self.snapshot_group_id = self.ssg_data['id']
if self.ssg_needs_update:
self.update_ssg()
else:
self.module.exit_json(changed=True, **self.ssg_data)
def update_ssg(self):
self.post_data = dict(
warningThreshold=self.warning_threshold,
autoDeleteLimit=self.delete_limit,
fullPolicy=self.full_policy,
rollbackPriority=self.rollback_priority
)
url = self.url + "storage-systems/%s/snapshot-groups/%s" % (self.ssid, self.snapshot_group_id)
try:
rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
except:
err = get_exception()
self.module.fail_json(msg="Failed to update snapshot group. " +
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
self.ssid,
str(err)))
def apply(self):
if self.state == 'absent':
if self.snapshot_group_id:
try:
rc, resp = request(
self.url + 'storage-systems/%s/snapshot-groups/%s' % (self.ssid, self.snapshot_group_id),
method='DELETE', headers=HEADERS, url_password=self.pwd, url_username=self.user,
validate_certs=self.certs)
except:
err = get_exception()
self.module.fail_json(msg="Failed to delete snapshot group. " +
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
self.ssid,
str(err)))
self.module.exit_json(changed=True, msg="Snapshot group removed", **self.ssg_data)
else:
self.module.exit_json(changed=False, msg="Snapshot group absent")
elif self.snapshot_group_id:
if self.ssg_needs_update:
self.update_ssg()
self.module.exit_json(changed=True, **self.ssg_data)
else:
self.module.exit_json(changed=False, **self.ssg_data)
else:
self.create_snapshot_group()
def main():
vg = SnapshotGroup()
vg.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
Lukas-Stuehrk/Babel3 | babel/messages/tests/mofile.py | 2 | 3421 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2011 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import doctest
import gettext
import os
import unittest
from babel.compat import BytesIO, u, text_type
from babel.messages import mofile, Catalog
class ReadMoTestCase(unittest.TestCase):
def setUp(self):
self.datadir = os.path.join(os.path.dirname(__file__), 'data')
def test_basics(self):
mo_file = open(os.path.join(self.datadir, 'project', 'i18n', 'de',
'LC_MESSAGES', 'messages.mo'), 'rb')
try:
catalog = mofile.read_mo(mo_file)
self.assertEqual(2, len(catalog))
self.assertEqual('TestProject', catalog.project)
self.assertEqual('0.1', catalog.version)
self.assertEqual('Stange', catalog['bar'].string)
self.assertEqual(['Fuhstange', 'Fuhstangen'],
catalog['foobar'].string)
finally:
mo_file.close()
class WriteMoTestCase(unittest.TestCase):
def test_sorting(self):
# Ensure the header is sorted to the first entry so that its charset
# can be applied to all subsequent messages by GNUTranslations
# (ensuring all messages are safely converted to unicode)
catalog = Catalog(locale='en_US')
catalog.add(u(''), '''\
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n''')
catalog.add(u('foo'), 'Voh')
catalog.add((u('There is'), u('There are')), (u('Es gibt'), u('Es gibt')))
catalog.add(u('Fizz'), '')
catalog.add(('Fuzz', 'Fuzzes'), ('', ''))
buf = BytesIO()
mofile.write_mo(buf, catalog)
buf.seek(0)
translations = gettext.GNUTranslations(fp=buf)
self.assertEqual(u('Voh'), translations.ugettext('foo'))
assert isinstance(translations.ugettext('foo'), text_type)
self.assertEqual(u('Es gibt'), translations.ungettext('There is', 'There are', 1))
assert isinstance(translations.ungettext('There is', 'There are', 1), text_type)
self.assertEqual(u('Fizz'), translations.ugettext('Fizz'))
assert isinstance(translations.ugettext('Fizz'), text_type)
self.assertEqual(u('Fuzz'), translations.ugettext('Fuzz'))
assert isinstance(translations.ugettext('Fuzz'), text_type)
self.assertEqual(u('Fuzzes'), translations.ugettext('Fuzzes'))
assert isinstance(translations.ugettext('Fuzzes'), text_type)
def test_more_plural_forms(self):
catalog2 = Catalog(locale='ru_RU')
catalog2.add(('Fuzz', 'Fuzzes'), ('', '', ''))
buf = BytesIO()
mofile.write_mo(buf, catalog2)
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(mofile, optionflags=doctest.ELLIPSIS))
suite.addTest(unittest.makeSuite(ReadMoTestCase))
suite.addTest(unittest.makeSuite(WriteMoTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| bsd-3-clause |
sameera2004/chxtools | chxtools/plot.py | 3 | 5392 | import numpy as np
import subprocess
from dataportal import DataBroker, DataMuxer
from dataportal.broker import EventQueue
import matplotlib.pyplot as plt
import time as ttime
import sys
from ophyd.userapi.scan_api import estimate
def new_queue(header, queue=None):
if queue is None:
queue = EventQueue(header)
return header, queue
hdr = DataBroker[-1]
if header.scan_id != hdr.scan_id:
print("New header found: Scan id = %s. uid = %s" %
(hdr.scan_id, hdr.run_start_uid))
sys.stdout.flush()
queue = EventQueue(hdr)
return hdr, queue
return header, queue
vlines = {'center_of_mass': {'color': 'red'},
'cen': {'color': 'red', 'ls': '--'},}
hlines = {'avgy': {'color': 'blue', 'ls': '-'},
'ymin': {'color': 'black', 'ls': '--'},
'ymax': {'color': 'black', 'ls': '--'}, }
points = {'cen': {'color': 'red', 'marker': 'o'},
'fwmh_left': {'color': 'red', 'marker': '<'},
'fwhm_right': {'color': 'red', 'marker': '>'}}
def plot1d(y, x=None, scans=None, live=True, sleep_time=1):
"""Plot live data and on-the-fly peak stats estimator
Parameters
----------
y : str
The name of the y value to plot
x : str, optional
The name of the value to plot on the x axis. If None, defaults
to the sequence number of the event (Note that this probably works,
but I'm not sure as it has not been tested!)
scans : list, optional
List of other scan indices to plot. uses db[] syntax, so any valid
entry to [] will work
live : bool, optional
Grab new data and plot it as it comes off. Defaults to True.
sleep_time : float, optional
Time to sleep between data updates. Defaults to 1 sec
"""
if scans is None:
scans = []
lines1 = {}
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(15,10), sharex=True)
fig.show()
for scan_id in scans:
hdr = DataBroker[scan_id]
events = DataBroker.fetch_events(hdr)
dm = DataMuxer.from_events(events)
df = dm.to_sparse_dataframe()
if x is None:
old_x = np.asarray(df.index)
else:
old_x = np.asarray(df[x])
old_y = np.asarray(df[y])
lines1[scan_id], = ax1.plot(old_x, old_y, 'o', ms=15, label=scan_id)
if x is None:
ax1.set_xlabel('scan point index')
ax2.set_xlabel('scan point index')
else:
ax1.set_xlabel(x)
ax2.set_xlabel(x)
ax1.set_ylabel(y)
ax2.set_ylabel(y)
ax1.set_title('data stream')
ax2.set_title('peak estimator')
if live:
hdr = DataBroker[-1]
scan_id = hdr.scan_id
while scan_id in lines1:
ttime.sleep(.5)
hdr = DataBroker[-1]
scan_id = hdr.scan_id
lines1[scan_id], = ax1.plot([], [], 'o', ms=15, label=scan_id)
queue = None
prev_stats = None
while True:
# loop until killed
hdr, queue = new_queue(hdr, queue)
scan_id = hdr.scan_id
queue.update()
new_events = queue.get()
try:
old_x, old_y = lines1[scan_id].get_data()
old_x = list(old_x)
old_y = list(old_y)
except KeyError:
lines1[scan_id], = ax1.plot([], [], 'o', ms=15, label=scan_id)
old_x, old_y = [], []
if x is None:
new_x = [event.seq_num for ev in new_events]
else:
new_x = [ev['data'][x] for ev in new_events]
new_y = [ev['data'][y] for ev in new_events]
new_x = old_x + new_x
new_y = old_y + new_y
lines1[scan_id].set_data(new_x, new_y)
ax1.relim(visible_only=True)
ax1.legend(loc=0).draggable()
# now deal with axis 2
try:
stats = estimate(np.asarray(new_x), np.asarray(new_y))
except ValueError:
stats = prev_stats
# print(stats)
if stats != prev_stats:
ax2.cla()
ax2.plot(new_x, new_y, 'o', ms=15, label=scan_id)
ax2.set_title('peak estimator')
for stat, vals in stats.items():
if stat in points:
# sometimes 'cen' comes back as one or two values. This
# try/except block is a way to do the right thing when
# this happens
try:
vals[0]
ax2.scatter(vals[0], vals[1], label=stat, **points[stat])
except IndexError:
ax2.axvline(vals, label=stat, **vlines[stat])
elif stat in hlines:
# draw a horizontal line
ax2.axhline(vals, label=stat, **hlines[stat])
elif stat in vlines:
# draw a vertical line
ax2.axvline(vals, label=stat, **vlines[stat])
prev_stats = stats
ax2.relim(visible_only=True)
ax2.legend(loc=0).draggable()
fig.canvas.draw()
fig.canvas.flush_events()
ttime.sleep(sleep_time)
| bsd-3-clause |
marlengit/hardfork_prototype_1_mvf-bu | qa/rpc-tests/bip68-sequence.py | 2 | 18338 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2015-2016 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test BIP68 implementation
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.blocktools import *
COIN = 100000000
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "64: non-BIP68-final"
class BIP68Test(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-blockprioritysize=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-blockprioritysize=0", "-acceptnonstdtxn=0"]))
self.is_network_split = False
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
def run_test(self):
# Generate some coins
self.nodes[0].generate(110)
print "Running test disable flag"
self.test_disable_flag()
print "Running test sequence-lock-confirmed-inputs"
self.test_sequence_lock_confirmed_inputs()
print "Running test sequence-lock-unconfirmed-inputs"
self.test_sequence_lock_unconfirmed_inputs()
print "Running test BIP68 not consensus before versionbits activation"
self.test_bip68_not_consensus()
print "Verifying nVersion=2 transactions aren't standard"
self.test_version2_relay(before_activation=True)
print "Activating BIP68 (and 112/113)"
self.activateCSV()
print "Verifying nVersion=2 transactions are now standard"
self.test_version2_relay(before_activation=False)
print "Passed\n"
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC
utxos = self.nodes[0].listunspent(0, 0)
assert(len(utxos) > 0)
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, CScript([b'a']))]
tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))]
tx2.rehash()
try:
self.nodes[0].sendrawtransaction(ToHex(tx2))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in xrange(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for i in xrange(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in xrange(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a'])))
rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"]
try:
self.nodes[0].sendrawtransaction(rawtx)
except JSONRPCException as exp:
assert(not should_pass and using_sequence_locks)
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(should_pass or not using_sequence_locks)
# Recalculate utxos if we successfully sent the transaction
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))]
tx.rehash()
try:
node.sendrawtransaction(ToHex(tx))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
assert(orig_tx.hash in node.getrawmempool())
else:
# orig_tx must not be in mempool
assert(orig_tx.hash not in node.getrawmempool())
return tx
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(tx2.hash, -1e15, int(-self.relayfee*COIN))
cur_time = int(time.time())
for i in xrange(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
assert(tx2.hash in self.nodes[0].getrawmempool())
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(tx2.hash, 1e15, int(self.relayfee*COIN))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
self.nodes[0].generate(1)
assert(tx2.hash not in self.nodes[0].getrawmempool())
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
assert(tx3.hash in self.nodes[0].getrawmempool())
self.nodes[0].generate(1)
assert(tx3.hash not in self.nodes[0].getrawmempool())
# One more test, this time using height locks
tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx4.hash in self.nodes[0].getrawmempool())
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx5.hash not in self.nodes[0].getrawmempool())
utxos = self.nodes[0].listunspent()
tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"]
try:
self.nodes[0].sendrawtransaction(raw_tx5)
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert(tx4.hash not in self.nodes[0].getrawmempool())
assert(tx3.hash in self.nodes[0].getrawmempool())
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16)
height = self.nodes[0].getblockcount()
for i in xrange(2):
block = create_block(tip, create_coinbase(height), cur_time)
block.nVersion = 3
block.rehash()
block.solve()
tip = block.sha256
height += 1
self.nodes[0].submitblock(ToHex(block))
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert(tx3.hash not in mempool)
assert(tx2.hash in mempool)
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(0)
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
self.nodes[0].generate(10)
# Make sure that BIP68 isn't being used to validate blocks, prior to
# versionbits activation. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active')
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
# sign tx2
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Now make an invalid spend of tx2 according to BIP68
sequence_value = 100 # 100 block relative locktime
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx3.rehash()
try:
self.nodes[0].sendrawtransaction(ToHex(tx3))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# make a block that violates bip68; ensure that the tip updates
tip = int(self.nodes[0].getbestblockhash(), 16)
block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1))
block.nVersion = 3
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].submitblock(ToHex(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 432 (3 periods)
min_activation_height = 432
height = self.nodes[0].getblockcount()
assert(height < 432)
self.nodes[0].generate(432-height)
assert(get_bip9_status(self.nodes[0], 'csv')['status'] == 'active')
sync_blocks(self.nodes)
# Use self.nodes[1] to test standardness relay policy
def test_version2_relay(self, before_activation):
inputs = [ ]
outputs = { self.nodes[1].getnewaddress() : 1.0 }
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"]
try:
tx_id = self.nodes[1].sendrawtransaction(tx_signed)
assert(before_activation == False)
except:
assert(before_activation)
if __name__ == '__main__':
BIP68Test().main()
| mit |
jasonbot/django | tests/template_tests/filter_tests/test_length.py | 521 | 1900 | from django.template.defaultfilters import length
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class LengthTests(SimpleTestCase):
@setup({'length01': '{{ list|length }}'})
def test_length01(self):
output = self.engine.render_to_string('length01', {'list': ['4', None, True, {}]})
self.assertEqual(output, '4')
@setup({'length02': '{{ list|length }}'})
def test_length02(self):
output = self.engine.render_to_string('length02', {'list': []})
self.assertEqual(output, '0')
@setup({'length03': '{{ string|length }}'})
def test_length03(self):
output = self.engine.render_to_string('length03', {'string': ''})
self.assertEqual(output, '0')
@setup({'length04': '{{ string|length }}'})
def test_length04(self):
output = self.engine.render_to_string('length04', {'string': 'django'})
self.assertEqual(output, '6')
@setup({'length05': '{% if string|length == 6 %}Pass{% endif %}'})
def test_length05(self):
output = self.engine.render_to_string('length05', {'string': mark_safe('django')})
self.assertEqual(output, 'Pass')
# Invalid uses that should fail silently.
@setup({'length06': '{{ int|length }}'})
def test_length06(self):
output = self.engine.render_to_string('length06', {'int': 7})
self.assertEqual(output, '0')
@setup({'length07': '{{ None|length }}'})
def test_length07(self):
output = self.engine.render_to_string('length07', {'None': None})
self.assertEqual(output, '0')
class FunctionTests(SimpleTestCase):
def test_string(self):
self.assertEqual(length('1234'), 4)
def test_safestring(self):
self.assertEqual(length(mark_safe('1234')), 4)
def test_list(self):
self.assertEqual(length([1, 2, 3, 4]), 4)
| bsd-3-clause |
ooici/marine-integrations | mi/dataset/parser/phsen.py | 1 | 28094 | #!/usr/bin/env python
"""
@package mi.dataset.parser.phsen
@file marine-integrations/mi/dataset/parser/phsen.py
@author Emily Hahn
@brief Parser for the mflm_phsen dataset driver
Release notes:
initial release
"""
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
import re
import ntplib
import time
from datetime import datetime
from dateutil import parser
from mi.core.log import get_logger ; log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.data_particle import DataParticle, DataParticleKey, DataParticleValue
from mi.core.exceptions import SampleException, DatasetParserException, RecoverableSampleException
from mi.dataset.parser.sio_mule_common import SioMuleParser, SIO_HEADER_MATCHER
# match the ascii hex ph records
# the data should be ascii hex, but may have non hex ascii characters, if this happens the
# value will be set to none
DATA_REGEX = b'(\^0A\r\*)([0-9A-Fa-f]{4}0A)([\x00-\xFF]{8})([\x00-\xFF]{446}[0-9A-Fa-f]{4})\r'
DATA_MATCHER = re.compile(DATA_REGEX)
# match the ascii hex control record, there is an optional 2 byte field at the end
# this also allows for non hex ascii characters in the timestamp, flags and number of records
CONTROL_REGEX = b'(\*)([0-9A-Fa-f]{4}[8-9A-Fa-f][0-9A-Fa-f])([\x00-\xFF]{32}[0-9A-Fa-f]{0,4})\r'
CONTROL_MATCHER = re.compile(CONTROL_REGEX)
# control messages are hex 80 or greater, so the first ascii char must be greater than 8 hex
CONTROL_ID_REGEX = b'[8-9A-Fa-f][0-9A-Fa-f]'
CONTROL_ID_MATCHER = re.compile(CONTROL_ID_REGEX)
TIMESTAMP_REGEX = b'[0-9A-Fa-f]{8}'
TIMESTAMP_MATCHER = re.compile(TIMESTAMP_REGEX)
HEX_INT_REGEX = b'[0-9A-Fa-f]{4}'
HEX_INT_MATCHER = re.compile(HEX_INT_REGEX)
# this occurs frequently at the end of ph messages, don't send an exception for this case
PH_EXTRA_END = b'?03\r'
# end of sio block of data marker
SIO_END = b'\x03'
PH_ID = '0A'
# the control message has an optional data or battery field for some control IDs
DATA_CONTROL_IDS = ['BF', 'FF']
BATT_CONTROL_IDS = ['CO', 'C1']
SIO_HEADER_BYTES = 33
NORMAL_CONTROL_LEN = 40
OPTIONAL_CONTROL_LEN = 44
MEASUREMENT_BYTES = 4
class DataParticleType(BaseEnum):
SAMPLE = 'phsen_abcdef_sio_mule_instrument'
CONTROL = 'phsen_abcdef_sio_mule_metadata'
class PhsenCommonDataParticleKey(BaseEnum):
CONTROLLER_TIMESTAMP = 'sio_controller_timestamp'
UNIQUE_ID = 'unique_id'
RECORD_TYPE = 'record_type'
RECORD_TIME = 'record_time'
PASSED_CHECKSUM = 'passed_checksum'
class PhsenParserDataParticleKey(PhsenCommonDataParticleKey):
THERMISTOR_START = 'thermistor_start'
REFERENCE_LIGHT_MEASUREMENTS = 'reference_light_measurements'
LIGHT_MEASUREMENTS = 'light_measurements'
VOLTAGE_BATTERY = 'voltage_battery'
THERMISTOR_END = 'thermistor_end'
class PhsenParserDataParticle(DataParticle):
"""
Class for parsing data from the mflm_phsen instrument
"""
_data_particle_type = DataParticleType.SAMPLE
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None):
super(PhsenParserDataParticle, self).__init__(raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None)
timestamp_match = TIMESTAMP_MATCHER.match(self.raw_data[:8])
if not timestamp_match:
raise RecoverableSampleException("PhsenParserDataParticle: No regex match of " \
"timestamp [%s]" % self.raw_data[:8])
self._data_match = DATA_MATCHER.match(self.raw_data[8:])
if not self._data_match:
raise RecoverableSampleException("PhsenParserDataParticle: No regex match of " \
"parsed sample data [%s]" % self.raw_data[8:])
# use the timestamp from the sio header as internal timestamp
sec_since_1970 = int(self.raw_data[:8], 16)
self.set_internal_timestamp(unix_time=sec_since_1970)
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
a particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
result = []
if self._data_match:
ref_meas = []
previous_record_bytes = 4
# 4 sets of 4 reference light measurements (16 total)
for i in range(0, 16):
start_idx = previous_record_bytes + i*MEASUREMENT_BYTES
# confirm this contains only ascii hex chars
if HEX_INT_MATCHER.match(self._data_match.group(4)[start_idx:start_idx+MEASUREMENT_BYTES]):
try:
this_ref = int(self._data_match.group(4)[start_idx:start_idx+MEASUREMENT_BYTES], 16)
ref_meas.append(this_ref)
except Exception as e:
ref_meas.append(None)
self._encoding_errors.append({PhsenParserDataParticleKey.REFERENCE_LIGHT_MEASUREMENTS: \
"Error encoding %d: %s" % (i, e)})
else:
# don't send an exception if a non ascii hex char is in this value
ref_meas.append(None)
light_meas = []
n_outer_sets = 23
n_inner_sets = 4
previous_record_bytes = 68
# 23 sets of 4 light measurements
for i in range(0, n_outer_sets):
for s in range(0,n_inner_sets):
start_idx = previous_record_bytes + i*n_inner_sets*MEASUREMENT_BYTES + s*MEASUREMENT_BYTES
# confirm this contains only ascii hex chars
if HEX_INT_MATCHER.match(self._data_match.group(4)[start_idx:start_idx+MEASUREMENT_BYTES]):
try:
this_meas = int(self._data_match.group(4)[start_idx:start_idx+MEASUREMENT_BYTES], 16)
light_meas.append(this_meas)
except Exception as e:
light_meas.append(None)
self._encoding_errors.append({PhsenParserDataParticleKey.LIGHT_MEASUREMENTS: \
"Error encoding (%d,%d): %s" % (i, s, e)})
else:
# don't send an exception if a non ascii hex char is in this value
light_meas.append(None)
# calculate the checksum and compare with the received checksum
passed_checksum = True
try:
chksum = int(self._data_match.group(0)[-3:-1], 16)
sum_bytes = 0
for i in range(7, 467, 2):
sum_bytes += int(self._data_match.group(0)[i:i+2], 16)
calc_chksum = sum_bytes & 255
if calc_chksum != chksum:
passed_checksum = False
log.debug('Calculated internal checksum %d does not match received %d', calc_chksum, chksum)
except Exception as e:
log.debug('Error calculating checksums: %s, setting passed checksum to False', e)
passed_checksum = False
result = [self._encode_value(PhsenParserDataParticleKey.CONTROLLER_TIMESTAMP, self.raw_data[:8],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenParserDataParticleKey.UNIQUE_ID, self._data_match.group(2)[0:2],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenParserDataParticleKey.RECORD_TYPE, self._data_match.group(2)[4:6],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenParserDataParticleKey.RECORD_TIME, self._data_match.group(3),
PhsenParserDataParticle.encode_timestamp),
self._encode_value(PhsenParserDataParticleKey.THERMISTOR_START, self._data_match.group(4)[0:4],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenParserDataParticleKey.REFERENCE_LIGHT_MEASUREMENTS,
ref_meas, list),
self._encode_value(PhsenParserDataParticleKey.LIGHT_MEASUREMENTS,
light_meas, list),
self._encode_value(PhsenParserDataParticleKey.VOLTAGE_BATTERY, self._data_match.group(0)[-11:-7],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenParserDataParticleKey.THERMISTOR_END, self._data_match.group(0)[-7:-3],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenParserDataParticleKey.PASSED_CHECKSUM, passed_checksum,
bool)]
return result
@staticmethod
def encode_int_16(val_str):
"""
Encode a hex string into an int
@param val_str string containing hex value
"""
return int(val_str, 16)
@staticmethod
def encode_timestamp(timestamp_str):
"""
Encode a hex value into an int if it matches the timestamp
@param timestamp_str string containing hex timestamp value
"""
timestamp_match = TIMESTAMP_MATCHER.match(timestamp_str)
if not timestamp_match:
return None
else:
return int(timestamp_str, 16)
class PhsenControlDataParticleKey(PhsenCommonDataParticleKey):
CLOCK_ACTIVE = 'clock_active'
RECORDING_ACTIVE = 'recording_active'
RECORD_END_ON_TIME = 'record_end_on_time'
RECORD_MEMORY_FULL = 'record_memory_full'
RECORD_END_ON_ERROR = 'record_end_on_error'
DATA_DOWNLOAD_OK = 'data_download_ok'
FLASH_MEMORY_OPEN = 'flash_memory_open'
BATTERY_LOW_PRESTART = 'battery_low_prestart'
BATTERY_LOW_MEASUREMENT = 'battery_low_measurement'
BATTERY_LOW_BLANK = 'battery_low_blank'
BATTERY_LOW_EXTERNAL = 'battery_low_external'
EXTERNAL_DEVICE1_FAULT = 'external_device1_fault'
EXTERNAL_DEVICE2_FAULT = 'external_device2_fault'
EXTERNAL_DEVICE3_FAULT = 'external_device3_fault'
FLASH_ERASED = 'flash_erased'
POWER_ON_INVALID = 'power_on_invalid'
NUM_DATA_RECORDS = 'num_data_records'
NUM_ERROR_RECORDS = 'num_error_records'
NUM_BYTES_STORED = 'num_bytes_stored'
VOLTAGE_BATTERY = 'voltage_battery'
class PhsenControlDataParticle(DataParticle):
"""
Class for parsing data from the mflm_phsen instrument
"""
_data_particle_type = DataParticleType.CONTROL
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None):
super(PhsenControlDataParticle, self).__init__(raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.PORT_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None)
timestamp_match = TIMESTAMP_MATCHER.match(self.raw_data[:8])
if not timestamp_match:
raise RecoverableSampleException("PhsenControlDataParticle: No regex match of " \
"timestamp [%s]" % self.raw_data[:8])
self._data_match = CONTROL_MATCHER.match(self.raw_data[8:])
if not self._data_match:
raise RecoverableSampleException("PhsenControlDataParticle: No regex match of " \
"parsed sample data [%s]" % self.raw_data[8:])
# use the timestamp from the sio header as internal timestamp
sec_since_1970 = int(self.raw_data[:8], 16)
self.set_internal_timestamp(unix_time=sec_since_1970)
def _build_parsed_values(self):
"""
Take something in the data format and turn it into
a particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
result = []
if self._data_match:
control_id = self._data_match.group(2)[4:6]
if (control_id in DATA_CONTROL_IDS or control_id in BATT_CONTROL_IDS):
if len(self._data_match.group(0)) != OPTIONAL_CONTROL_LEN:
raise RecoverableSampleException("PhsenControlDataParticle: for id %s size does not match %d",
control_id, OPTIONAL_CONTROL_LEN)
elif len(self._data_match.group(0)) != NORMAL_CONTROL_LEN:
raise RecoverableSampleException("PhsenControlDataParticle: for id %s size does not match %d",
control_id, NORMAL_CONTROL_LEN)
# calculate the checksum and compare with the received checksum
passed_checksum = True
try:
chksum = int(self._data_match.group(0)[-3:-1], 16)
sum_bytes = 0
# subtract the 3 bytes for the '*' and unique ID, 2 for the checksum, and 1 for the last \r
control_len = len(self._data_match.group(0)) - 6
for i in range(3, control_len, 2):
sum_bytes += int(self._data_match.group(0)[i:i+2], 16)
calc_chksum = sum_bytes & 255
if calc_chksum != chksum:
passed_checksum = False
log.debug('Calculated internal checksum %d does not match received %d', calc_chksum, chksum)
except Exception as e:
log.debug('Error calculating checksums: %s, setting passed checksum to False', e)
passed_checksum = False
# turn the flag value from a hex-ascii value into a string of binary values
try:
flags = bin(int(self._data_match.group(3)[8:12], 16))[2:].zfill(16)
valid_flags = True
except Exception:
valid_flags = False
result = [
self._encode_value(PhsenControlDataParticleKey.CONTROLLER_TIMESTAMP, self.raw_data[:8],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenControlDataParticleKey.UNIQUE_ID, self._data_match.group(2)[0:2],
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenControlDataParticleKey.RECORD_TYPE, control_id,
PhsenParserDataParticle.encode_int_16),
self._encode_value(PhsenControlDataParticleKey.RECORD_TIME, self._data_match.group(3)[0:8],
PhsenParserDataParticle.encode_timestamp)]
# if the flag is valid, fill in the values, otherwise set to None
if valid_flags:
result.extend([
self._encode_value(PhsenControlDataParticleKey.CLOCK_ACTIVE, flags[0],
bool),
self._encode_value(PhsenControlDataParticleKey.RECORDING_ACTIVE, flags[1],
bool),
self._encode_value(PhsenControlDataParticleKey.RECORD_END_ON_TIME, flags[2],
bool),
self._encode_value(PhsenControlDataParticleKey.RECORD_MEMORY_FULL, flags[3],
bool),
self._encode_value(PhsenControlDataParticleKey.RECORD_END_ON_ERROR, flags[4],
bool),
self._encode_value(PhsenControlDataParticleKey.DATA_DOWNLOAD_OK, flags[5],
bool),
self._encode_value(PhsenControlDataParticleKey.FLASH_MEMORY_OPEN, flags[6],
bool),
self._encode_value(PhsenControlDataParticleKey.BATTERY_LOW_PRESTART, flags[7],
bool),
self._encode_value(PhsenControlDataParticleKey.BATTERY_LOW_MEASUREMENT, flags[8],
bool),
self._encode_value(PhsenControlDataParticleKey.BATTERY_LOW_BLANK, flags[9],
bool),
self._encode_value(PhsenControlDataParticleKey.BATTERY_LOW_EXTERNAL, flags[10],
bool),
self._encode_value(PhsenControlDataParticleKey.EXTERNAL_DEVICE1_FAULT, flags[11],
bool),
self._encode_value(PhsenControlDataParticleKey.EXTERNAL_DEVICE2_FAULT, flags[12],
bool),
self._encode_value(PhsenControlDataParticleKey.EXTERNAL_DEVICE3_FAULT, flags[13],
bool),
self._encode_value(PhsenControlDataParticleKey.FLASH_ERASED, flags[14],
bool),
self._encode_value(PhsenControlDataParticleKey.POWER_ON_INVALID, flags[15],
bool)])
else:
result.extend([
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.CLOCK_ACTIVE,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.RECORDING_ACTIVE,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.RECORD_END_ON_TIME,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.RECORD_MEMORY_FULL,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.RECORD_END_ON_ERROR,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.DATA_DOWNLOAD_OK,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.FLASH_MEMORY_OPEN,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.BATTERY_LOW_PRESTART,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.BATTERY_LOW_MEASUREMENT,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.BATTERY_LOW_BLANK,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.BATTERY_LOW_EXTERNAL,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.EXTERNAL_DEVICE1_FAULT,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.EXTERNAL_DEVICE2_FAULT,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.EXTERNAL_DEVICE3_FAULT,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.FLASH_ERASED,
DataParticleKey.VALUE: None},
{DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.POWER_ON_INVALID,
DataParticleKey.VALUE: None}])
# these 3 may also have invalid hex values, allow for none when encoding
# so exceptions are not thrown here
result.extend([
self._encode_value(PhsenControlDataParticleKey.NUM_DATA_RECORDS,
self._data_match.group(3)[12:18],
PhsenControlDataParticle.encode_int_16_or_none),
self._encode_value(PhsenControlDataParticleKey.NUM_ERROR_RECORDS,
self._data_match.group(3)[18:24],
PhsenControlDataParticle.encode_int_16_or_none),
self._encode_value(PhsenControlDataParticleKey.NUM_BYTES_STORED,
self._data_match.group(3)[24:30],
PhsenControlDataParticle.encode_int_16_or_none)])
if control_id in BATT_CONTROL_IDS and HEX_INT_MATCHER.match(self._data_match.group(3)[30:34]):
result.append(self._encode_value(PhsenControlDataParticleKey.VOLTAGE_BATTERY,
self._data_match.group(3)[30:34],
PhsenParserDataParticle.encode_int_16))
else:
result.append({DataParticleKey.VALUE_ID: PhsenControlDataParticleKey.VOLTAGE_BATTERY,
DataParticleKey.VALUE: None})
result.append(self._encode_value(PhsenControlDataParticleKey.PASSED_CHECKSUM, passed_checksum,
bool))
return result
@staticmethod
def encode_int_16_or_none(int_val):
"""
Use to convert from hex-ascii to int when encoding data particle values,
but it is not an error to not match, return None without failing encoding
"""
result = None
try:
result = int(int_val, 16)
except Exception:
# the result will stay at None if we fail the encoding, and no exception
pass
return result
class PhsenParser(SioMuleParser):
def __init__(self,
config,
state,
stream_handle,
state_callback,
publish_callback,
exception_callback,
*args, **kwargs):
super(PhsenParser, self).__init__(config,
stream_handle,
state,
self.sieve_function,
state_callback,
publish_callback,
exception_callback,
*args,
**kwargs)
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle, update the position and
timestamp. Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state. An empty list of nothing was parsed.
"""
result_particles = []
# non-data does not need to be handled here because for the single file
# the data may be corrected and re-written later, it is just ignored until it matches
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()
while (chunk != None):
header_match = SIO_HEADER_MATCHER.match(chunk)
sample_count = 0
if header_match.group(1) == 'PH':
# start after the sio header
index = header_match.end(0)
last_index = index
chunk_len = len(chunk)
while index < chunk_len:
data_match = DATA_MATCHER.match(chunk[index:])
control_match = CONTROL_MATCHER.match(chunk[index:])
# check for any valid match and make sure no extra data was found between valid matches
if data_match or control_match or chunk[index] == SIO_END:
# if the indices don't match we have data that doesn't match
# exclude the expected possible ph end bytes
if last_index != index and chunk[last_index:index] != PH_EXTRA_END:
# we found bad data, send a sample exception but keep processing the file
log.warning("unknown data found in chunk %s from %d to %d",
chunk[1:32], last_index, index)
self._exception_callback(SampleException("unknown data found in chunk %s from %d to %d" %
(chunk[1:32], last_index, index)))
# stop processing this sio block, it is bad
break;
if data_match:
log.debug('Found data match in chunk %s at index %d', chunk[1:32], index)
# particle-ize the data block received, return the record
# pre-pend the sio header timestamp to the data record (in header_match.group(3))
sample = self._extract_sample(PhsenParserDataParticle, None,
header_match.group(3) + data_match.group(0),
None)
if sample:
# create particle
result_particles.append(sample)
sample_count += 1
index += len(data_match.group(0))
last_index = index
elif control_match:
log.debug('Found control match in chunk %s at index %d', chunk[1:32], index)
# particle-ize the data block received, return the record
# pre-pend the sio header timestamp to the control record (in header_match.group(3))
sample = self._extract_sample(PhsenControlDataParticle, None,
header_match.group(3) + control_match.group(0),
None)
if sample:
# create particle
result_particles.append(sample)
sample_count += 1
index += len(control_match.group(0))
last_index = index
elif chunk[index] == SIO_END:
# found end of sio block marker, we are done with this chunk
break;
else:
# we found extra data, warn on chunks of extra data not each byte
index += 1
self._chunk_sample_count.append(sample_count)
# non-data does not need to be handled here because for the single file
# the data may be corrected and re-written later, it is just ignored until it matches
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()
return result_particles | bsd-2-clause |
jswope00/GAI | common/djangoapps/student/migrations/0016_auto__add_field_courseenrollment_date__chg_field_userprofile_country.py | 188 | 10960 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseEnrollment.date'
db.add_column('student_courseenrollment', 'date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True),
keep_default=False)
# Changing field 'UserProfile.country'
db.alter_column('auth_userprofile', 'country', self.gf('django_countries.fields.CountryField')(max_length=2, null=True))
def backwards(self, orm):
# Deleting field 'CourseEnrollment.date'
db.delete_column('student_courseenrollment', 'date')
# Changing field 'UserProfile.country'
db.alter_column('auth_userprofile', 'country', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.courseenrollment': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
pombredanne/xpython | triangle/triangle_test.py | 11 | 2225 | import unittest
from triangle import Triangle, TriangleError
class TriangleTests(unittest.TestCase):
def test_equilateral_triangles_have_equal_sides(self):
self.assertEqual("equilateral", Triangle(2, 2, 2).kind())
def test_larger_equilateral_triangles_also_have_equal_sides(self):
self.assertEqual("equilateral", Triangle(10, 10, 10).kind())
def test_isosceles_triangles_have_last_two_sides_equal(self):
self.assertEqual("isosceles", Triangle(3, 4, 4).kind())
def test_isosceles_triangles_have_first_and_last_sides_equal(self):
self.assertEqual("isosceles", Triangle(4, 3, 4).kind())
def test_isosceles_triangles_have_two_first_sides_equal(self):
self.assertEqual("isosceles", Triangle(4, 4, 3).kind())
def test_isosceles_triangles_have_in_fact_exactly_two_sides_equal(self):
self.assertEqual("isosceles", Triangle(10, 10, 2).kind())
def test_scalene_triangles_have_no_equal_sides(self):
self.assertEqual("scalene", Triangle(3, 4, 5).kind())
def test_scalene_triangles_have_no_equal_sides_at_a_larger_scale_too(self):
self.assertEqual("scalene", Triangle(10, 11, 12).kind())
self.assertEqual("scalene", Triangle(5, 4, 2).kind())
def test_very_small_triangles_are_legal(self):
self.assertEqual("scalene", Triangle(0.4, 0.6, 0.3).kind())
def test_triangles_with_no_size_are_illegal(self):
self.assertRaises(
TriangleError,
Triangle, 0, 0, 0
)
def test_triangles_with_negative_sides_are_illegal(self):
self.assertRaises(
TriangleError,
Triangle, 3, 4, -5
)
def test_triangles_violating_triangle_inequality_are_illegal(self):
self.assertRaises(
TriangleError,
Triangle, 1, 1, 3
)
def test_triangles_violating_triangle_inequality_are_illegal_2(self):
self.assertRaises(
TriangleError,
Triangle, 2, 4, 2
)
def test_triangles_violating_triangle_inequality_are_illegal_3(self):
self.assertRaises(
TriangleError,
Triangle, 7, 3, 2
)
if __name__ == '__main__':
unittest.main()
| mit |
jianghuaw/nova | nova/tests/unit/test_rpc.py | 2 | 19934 | # Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import fixtures
import mock
import oslo_messaging as messaging
from oslo_messaging.rpc import dispatcher
from oslo_serialization import jsonutils
import testtools
from nova import context
from nova import rpc
from nova import test
# Make a class that resets all of the global variables in nova.rpc
class RPCResetFixture(fixtures.Fixture):
def _setUp(self):
self.trans = copy.copy(rpc.TRANSPORT)
self.noti_trans = copy.copy(rpc.NOTIFICATION_TRANSPORT)
self.noti = copy.copy(rpc.NOTIFIER)
self.all_mods = copy.copy(rpc.ALLOWED_EXMODS)
self.ext_mods = copy.copy(rpc.EXTRA_EXMODS)
self.conf = copy.copy(rpc.CONF)
self.addCleanup(self._reset_everything)
def _reset_everything(self):
rpc.TRANSPORT = self.trans
rpc.NOTIFICATION_TRANSPORT = self.noti_trans
rpc.NOTIFIER = self.noti
rpc.ALLOWED_EXMODS = self.all_mods
rpc.EXTRA_EXMODS = self.ext_mods
rpc.CONF = self.conf
# We can't import nova.test.TestCase because that sets up an RPCFixture
# that pretty much nullifies all of this testing
class TestRPC(testtools.TestCase):
def setUp(self):
super(TestRPC, self).setUp()
self.useFixture(RPCResetFixture())
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_notification_transport')
@mock.patch.object(messaging, 'Notifier')
def test_init_unversioned(self, mock_notif, mock_noti_trans,
mock_ser, mock_exmods):
# The expected call to get the legacy notifier will require no new
# kwargs, and we expect the new notifier will need the noop driver
expected = [{}, {'driver': 'noop'}]
self._test_init(mock_notif, mock_noti_trans, mock_ser,
mock_exmods, 'unversioned', expected)
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_notification_transport')
@mock.patch.object(messaging, 'Notifier')
def test_init_both(self, mock_notif, mock_noti_trans,
mock_ser, mock_exmods):
expected = [{}, {'topics': ['versioned_notifications']}]
self._test_init(mock_notif, mock_noti_trans, mock_ser,
mock_exmods, 'both', expected)
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_notification_transport')
@mock.patch.object(messaging, 'Notifier')
def test_init_versioned(self, mock_notif, mock_noti_trans,
mock_ser, mock_exmods):
expected = [{'driver': 'noop'},
{'topics': ['versioned_notifications']}]
self._test_init(mock_notif, mock_noti_trans, mock_ser,
mock_exmods, 'versioned', expected)
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_notification_transport')
@mock.patch.object(messaging, 'Notifier')
def test_init_versioned_with_custom_topics(self, mock_notif,
mock_noti_trans, mock_ser,
mock_exmods):
expected = [{'driver': 'noop'},
{'topics': ['custom_topic1', 'custom_topic2']}]
self._test_init(
mock_notif, mock_noti_trans, mock_ser, mock_exmods, 'versioned',
expected, versioned_notification_topics=['custom_topic1',
'custom_topic2'])
def test_cleanup_transport_null(self):
rpc.NOTIFICATION_TRANSPORT = mock.Mock()
rpc.LEGACY_NOTIFIER = mock.Mock()
rpc.NOTIFIER = mock.Mock()
self.assertRaises(AssertionError, rpc.cleanup)
def test_cleanup_notification_transport_null(self):
rpc.TRANSPORT = mock.Mock()
rpc.NOTIFIER = mock.Mock()
self.assertRaises(AssertionError, rpc.cleanup)
def test_cleanup_legacy_notifier_null(self):
rpc.TRANSPORT = mock.Mock()
rpc.NOTIFICATION_TRANSPORT = mock.Mock()
rpc.NOTIFIER = mock.Mock()
def test_cleanup_notifier_null(self):
rpc.TRANSPORT = mock.Mock()
rpc.LEGACY_NOTIFIER = mock.Mock()
rpc.NOTIFICATION_TRANSPORT = mock.Mock()
self.assertRaises(AssertionError, rpc.cleanup)
def test_cleanup(self):
rpc.LEGACY_NOTIFIER = mock.Mock()
rpc.NOTIFIER = mock.Mock()
rpc.NOTIFICATION_TRANSPORT = mock.Mock()
rpc.TRANSPORT = mock.Mock()
trans_cleanup = mock.Mock()
not_trans_cleanup = mock.Mock()
rpc.TRANSPORT.cleanup = trans_cleanup
rpc.NOTIFICATION_TRANSPORT.cleanup = not_trans_cleanup
rpc.cleanup()
trans_cleanup.assert_called_once_with()
not_trans_cleanup.assert_called_once_with()
self.assertIsNone(rpc.TRANSPORT)
self.assertIsNone(rpc.NOTIFICATION_TRANSPORT)
self.assertIsNone(rpc.LEGACY_NOTIFIER)
self.assertIsNone(rpc.NOTIFIER)
@mock.patch.object(messaging, 'set_transport_defaults')
def test_set_defaults(self, mock_set):
control_exchange = mock.Mock()
rpc.set_defaults(control_exchange)
mock_set.assert_called_once_with(control_exchange)
def test_add_extra_exmods(self):
rpc.EXTRA_EXMODS = []
rpc.add_extra_exmods('foo', 'bar')
self.assertEqual(['foo', 'bar'], rpc.EXTRA_EXMODS)
def test_clear_extra_exmods(self):
rpc.EXTRA_EXMODS = ['foo', 'bar']
rpc.clear_extra_exmods()
self.assertEqual(0, len(rpc.EXTRA_EXMODS))
def test_get_allowed_exmods(self):
rpc.ALLOWED_EXMODS = ['foo']
rpc.EXTRA_EXMODS = ['bar']
exmods = rpc.get_allowed_exmods()
self.assertEqual(['foo', 'bar'], exmods)
@mock.patch.object(messaging, 'TransportURL')
def test_get_transport_url(self, mock_url):
conf = mock.Mock()
rpc.CONF = conf
mock_url.parse.return_value = 'foo'
url = rpc.get_transport_url(url_str='bar')
self.assertEqual('foo', url)
mock_url.parse.assert_called_once_with(conf, 'bar')
@mock.patch.object(messaging, 'TransportURL')
def test_get_transport_url_null(self, mock_url):
conf = mock.Mock()
rpc.CONF = conf
mock_url.parse.return_value = 'foo'
url = rpc.get_transport_url()
self.assertEqual('foo', url)
mock_url.parse.assert_called_once_with(conf, None)
@mock.patch.object(rpc, 'profiler', None)
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'RPCClient')
def test_get_client(self, mock_client, mock_ser):
rpc.TRANSPORT = mock.Mock()
tgt = mock.Mock()
ser = mock.Mock()
mock_client.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
mock_client.assert_called_once_with(rpc.TRANSPORT,
tgt, version_cap='1.0',
serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'profiler', None)
@mock.patch.object(rpc, 'RequestContextSerializer')
@mock.patch.object(messaging, 'get_rpc_server')
def test_get_server(self, mock_get, mock_ser):
rpc.TRANSPORT = mock.Mock()
ser = mock.Mock()
tgt = mock.Mock()
ends = mock.Mock()
mock_ser.return_value = ser
mock_get.return_value = 'server'
server = rpc.get_server(tgt, ends, serializer='foo')
mock_ser.assert_called_once_with('foo')
access_policy = dispatcher.DefaultRPCAccessPolicy
mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends,
executor='eventlet', serializer=ser,
access_policy=access_policy)
self.assertEqual('server', server)
@mock.patch.object(rpc, 'profiler', mock.Mock())
@mock.patch.object(rpc, 'ProfilerRequestContextSerializer')
@mock.patch.object(messaging, 'RPCClient')
def test_get_client_profiler_enabled(self, mock_client, mock_ser):
rpc.TRANSPORT = mock.Mock()
tgt = mock.Mock()
ser = mock.Mock()
mock_client.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
mock_client.assert_called_once_with(rpc.TRANSPORT,
tgt, version_cap='1.0',
serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'profiler', mock.Mock())
@mock.patch.object(rpc, 'ProfilerRequestContextSerializer')
@mock.patch.object(messaging, 'get_rpc_server')
def test_get_server_profiler_enabled(self, mock_get, mock_ser):
rpc.TRANSPORT = mock.Mock()
ser = mock.Mock()
tgt = mock.Mock()
ends = mock.Mock()
mock_ser.return_value = ser
mock_get.return_value = 'server'
server = rpc.get_server(tgt, ends, serializer='foo')
mock_ser.assert_called_once_with('foo')
access_policy = dispatcher.DefaultRPCAccessPolicy
mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends,
executor='eventlet', serializer=ser,
access_policy=access_policy)
self.assertEqual('server', server)
def test_get_notifier(self):
rpc.LEGACY_NOTIFIER = mock.Mock()
mock_prep = mock.Mock()
mock_prep.return_value = 'notifier'
rpc.LEGACY_NOTIFIER.prepare = mock_prep
notifier = rpc.get_notifier('service', publisher_id='foo')
mock_prep.assert_called_once_with(publisher_id='foo')
self.assertIsInstance(notifier, rpc.LegacyValidatingNotifier)
self.assertEqual('notifier', notifier.notifier)
def test_get_notifier_null_publisher(self):
rpc.LEGACY_NOTIFIER = mock.Mock()
mock_prep = mock.Mock()
mock_prep.return_value = 'notifier'
rpc.LEGACY_NOTIFIER.prepare = mock_prep
notifier = rpc.get_notifier('service', host='bar')
mock_prep.assert_called_once_with(publisher_id='service.bar')
self.assertIsInstance(notifier, rpc.LegacyValidatingNotifier)
self.assertEqual('notifier', notifier.notifier)
def test_get_versioned_notifier(self):
rpc.NOTIFIER = mock.Mock()
mock_prep = mock.Mock()
mock_prep.return_value = 'notifier'
rpc.NOTIFIER.prepare = mock_prep
notifier = rpc.get_versioned_notifier('service.foo')
mock_prep.assert_called_once_with(publisher_id='service.foo')
self.assertEqual('notifier', notifier)
@mock.patch.object(rpc, 'get_allowed_exmods')
@mock.patch.object(messaging, 'get_rpc_transport')
def test_create_transport(self, mock_transport, mock_exmods):
exmods = mock_exmods.return_value
transport = rpc.create_transport(mock.sentinel.url)
self.assertEqual(mock_transport.return_value, transport)
mock_exmods.assert_called_once_with()
mock_transport.assert_called_once_with(rpc.CONF,
url=mock.sentinel.url,
allowed_remote_exmods=exmods)
def _test_init(self, mock_notif, mock_noti_trans, mock_ser,
mock_exmods, notif_format, expected_driver_topic_kwargs,
versioned_notification_topics=['versioned_notifications']):
legacy_notifier = mock.Mock()
notifier = mock.Mock()
notif_transport = mock.Mock()
transport = mock.Mock()
serializer = mock.Mock()
conf = mock.Mock()
conf.transport_url = None
conf.notifications.notification_format = notif_format
conf.notifications.versioned_notifications_topics = (
versioned_notification_topics)
mock_exmods.return_value = ['foo']
mock_noti_trans.return_value = notif_transport
mock_ser.return_value = serializer
mock_notif.side_effect = [legacy_notifier, notifier]
@mock.patch.object(rpc, 'CONF', new=conf)
@mock.patch.object(rpc, 'create_transport')
@mock.patch.object(rpc, 'get_transport_url')
def _test(get_url, create_transport):
create_transport.return_value = transport
rpc.init(conf)
create_transport.assert_called_once_with(get_url.return_value)
_test()
self.assertTrue(mock_exmods.called)
self.assertIsNotNone(rpc.TRANSPORT)
self.assertIsNotNone(rpc.LEGACY_NOTIFIER)
self.assertIsNotNone(rpc.NOTIFIER)
self.assertEqual(legacy_notifier, rpc.LEGACY_NOTIFIER)
self.assertEqual(notifier, rpc.NOTIFIER)
expected_calls = []
for kwargs in expected_driver_topic_kwargs:
expected_kwargs = {'serializer': serializer}
expected_kwargs.update(kwargs)
expected_calls.append(((notif_transport,), expected_kwargs))
self.assertEqual(expected_calls, mock_notif.call_args_list,
"The calls to messaging.Notifier() did not create "
"the legacy and versioned notifiers properly.")
class TestJsonPayloadSerializer(test.NoDBTestCase):
def test_serialize_entity(self):
with mock.patch.object(jsonutils, 'to_primitive') as mock_prim:
rpc.JsonPayloadSerializer.serialize_entity('context', 'entity')
mock_prim.assert_called_once_with('entity', convert_instances=True)
class TestRequestContextSerializer(test.NoDBTestCase):
def setUp(self):
super(TestRequestContextSerializer, self).setUp()
self.mock_base = mock.Mock()
self.ser = rpc.RequestContextSerializer(self.mock_base)
self.ser_null = rpc.RequestContextSerializer(None)
def test_serialize_entity(self):
self.mock_base.serialize_entity.return_value = 'foo'
ser_ent = self.ser.serialize_entity('context', 'entity')
self.mock_base.serialize_entity.assert_called_once_with('context',
'entity')
self.assertEqual('foo', ser_ent)
def test_serialize_entity_null_base(self):
ser_ent = self.ser_null.serialize_entity('context', 'entity')
self.assertEqual('entity', ser_ent)
def test_deserialize_entity(self):
self.mock_base.deserialize_entity.return_value = 'foo'
deser_ent = self.ser.deserialize_entity('context', 'entity')
self.mock_base.deserialize_entity.assert_called_once_with('context',
'entity')
self.assertEqual('foo', deser_ent)
def test_deserialize_entity_null_base(self):
deser_ent = self.ser_null.deserialize_entity('context', 'entity')
self.assertEqual('entity', deser_ent)
def test_serialize_context(self):
context = mock.Mock()
self.ser.serialize_context(context)
context.to_dict.assert_called_once_with()
@mock.patch.object(context, 'RequestContext')
def test_deserialize_context(self, mock_req):
self.ser.deserialize_context('context')
mock_req.from_dict.assert_called_once_with('context')
class TestProfilerRequestContextSerializer(test.NoDBTestCase):
def setUp(self):
super(TestProfilerRequestContextSerializer, self).setUp()
self.ser = rpc.ProfilerRequestContextSerializer(mock.Mock())
@mock.patch('nova.rpc.profiler')
def test_serialize_context(self, mock_profiler):
prof = mock_profiler.get.return_value
prof.hmac_key = 'swordfish'
prof.get_base_id.return_value = 'baseid'
prof.get_id.return_value = 'parentid'
context = mock.Mock()
context.to_dict.return_value = {'project_id': 'test'}
self.assertEqual({'project_id': 'test',
'trace_info': {
'hmac_key': 'swordfish',
'base_id': 'baseid',
'parent_id': 'parentid'}},
self.ser.serialize_context(context))
@mock.patch('nova.rpc.profiler')
def test_deserialize_context(self, mock_profiler):
serialized = {'project_id': 'test',
'trace_info': {
'hmac_key': 'swordfish',
'base_id': 'baseid',
'parent_id': 'parentid'}}
context = self.ser.deserialize_context(serialized)
self.assertEqual('test', context.project_id)
mock_profiler.init.assert_called_once_with(
hmac_key='swordfish', base_id='baseid', parent_id='parentid')
class TestClientRouter(test.NoDBTestCase):
@mock.patch('oslo_messaging.RPCClient')
def test_by_instance(self, mock_rpcclient):
default_client = mock.Mock()
cell_client = mock.Mock()
mock_rpcclient.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = mock.sentinel.transport
router = rpc.ClientRouter(default_client)
client = router.client(ctxt)
# verify a client was created by ClientRouter
mock_rpcclient.assert_called_once_with(
mock.sentinel.transport, default_client.target,
version_cap=default_client.version_cap,
serializer=default_client.serializer)
# verify cell client was returned
self.assertEqual(cell_client, client)
@mock.patch('oslo_messaging.RPCClient')
def test_by_instance_untargeted(self, mock_rpcclient):
default_client = mock.Mock()
cell_client = mock.Mock()
mock_rpcclient.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = None
router = rpc.ClientRouter(default_client)
client = router.client(ctxt)
self.assertEqual(router.default_client, client)
self.assertFalse(mock_rpcclient.called)
class TestIsNotificationsEnabledDecorator(test.NoDBTestCase):
def setUp(self):
super(TestIsNotificationsEnabledDecorator, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'f'
self.decorated = rpc.if_notifications_enabled(self.f)
def test_call_func_if_needed(self):
self.decorated()
self.f.assert_called_once_with()
@mock.patch('nova.rpc.NOTIFIER.is_enabled', return_value=False)
def test_not_call_func_if_notifier_disabled(self, mock_is_enabled):
self.decorated()
self.assertEqual(0, len(self.f.mock_calls))
def test_not_call_func_if_only_unversioned_notifications_requested(self):
self.flags(notification_format='unversioned', group='notifications')
self.decorated()
self.assertEqual(0, len(self.f.mock_calls))
| apache-2.0 |
rakeshmi/cinder | cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py | 19 | 10826 | # (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from oslo_utils import excutils
import paramiko
import six
from cinder import exception
from cinder.i18n import _, _LE
from cinder import utils
from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts
import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant
from cinder.zonemanager import fc_san_lookup_service as fc_service
LOG = logging.getLogger(__name__)
class BrcdFCSanLookupService(fc_service.FCSanLookupService):
"""The SAN lookup service that talks to Brocade switches.
Version History:
1.0.0 - Initial version
"""
VERSION = "1.0.0"
def __init__(self, **kwargs):
"""Initializing the client."""
super(BrcdFCSanLookupService, self).__init__(**kwargs)
self.configuration = kwargs.get('configuration', None)
self.create_configuration()
self.client = self.create_ssh_client(**kwargs)
def create_configuration(self):
"""Configuration specific to SAN context values."""
config = self.configuration
fabric_names = [x.strip() for x in config.fc_fabric_names.split(',')]
LOG.debug('Fabric Names: %s', fabric_names)
# There can be more than one SAN in the network and we need to
# get credentials for each for SAN context lookup later.
if len(fabric_names) > 0:
self.fabric_configs = fabric_opts.load_fabric_configurations(
fabric_names)
def create_ssh_client(self, **kwargs):
ssh_client = paramiko.SSHClient()
known_hosts_file = kwargs.get('known_hosts_file', None)
if known_hosts_file is None:
ssh_client.load_system_host_keys()
else:
ssh_client.load_host_keys(known_hosts_file)
missing_key_policy = kwargs.get('missing_key_policy', None)
if missing_key_policy is None:
missing_key_policy = paramiko.WarningPolicy()
ssh_client.set_missing_host_key_policy(missing_key_policy)
return ssh_client
def get_device_mapping_from_network(self,
initiator_wwn_list,
target_wwn_list):
"""Provides the initiator/target map for available SAN contexts.
Looks up nameserver of each fc SAN configured to find logged in devices
and returns a map of initiator and target port WWNs for each fabric.
:param initiator_wwn_list: List of initiator port WWN
:param target_wwn_list: List of target port WWN
:returns List -- device wwn map in following format
{
<San name>: {
'initiator_port_wwn_list':
('200000051e55a100', '200000051e55a121'..)
'target_port_wwn_list':
('100000051e55a100', '100000051e55a121'..)
}
}
:raises Exception when connection to fabric is failed
"""
device_map = {}
formatted_target_list = []
formatted_initiator_list = []
fabric_map = {}
fabric_names = self.configuration.fc_fabric_names
fabrics = None
if not fabric_names:
raise exception.InvalidParameterValue(
err=_("Missing Fibre Channel SAN configuration "
"param - fc_fabric_names"))
fabrics = [x.strip() for x in fabric_names.split(',')]
LOG.debug("FC Fabric List: %s", fabrics)
if fabrics:
for t in target_wwn_list:
formatted_target_list.append(self.get_formatted_wwn(t))
for i in initiator_wwn_list:
formatted_initiator_list.append(self.
get_formatted_wwn(i))
for fabric_name in fabrics:
fabric_ip = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_address')
fabric_user = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_user')
fabric_pwd = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_password')
fabric_port = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_port')
# Get name server data from fabric and find the targets
# logged in
nsinfo = ''
try:
LOG.debug("Getting name server data for "
"fabric %s", fabric_ip)
self.client.connect(
fabric_ip, fabric_port, fabric_user, fabric_pwd)
nsinfo = self.get_nameserver_info()
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting name server info from"
" fabric %s"), fabric_ip)
except Exception as e:
msg = _("SSH connection failed "
"for %(fabric)s with error: %(err)s"
) % {'fabric': fabric_ip, 'err': e}
LOG.error(msg)
raise exception.FCSanLookupServiceException(message=msg)
finally:
self.client.close()
LOG.debug("Lookup service:nsinfo-%s", nsinfo)
LOG.debug("Lookup service:initiator list from "
"caller-%s", formatted_initiator_list)
LOG.debug("Lookup service:target list from "
"caller-%s", formatted_target_list)
visible_targets = filter(lambda x: x in formatted_target_list,
nsinfo)
visible_initiators = filter(lambda x: x in
formatted_initiator_list, nsinfo)
if visible_targets:
LOG.debug("Filtered targets is: %s", visible_targets)
# getting rid of the : before returning
for idx, elem in enumerate(visible_targets):
elem = str(elem).replace(':', '')
visible_targets[idx] = elem
else:
LOG.debug("No targets are in the nameserver for SAN %s",
fabric_name)
if visible_initiators:
# getting rid of the : before returning ~sk
for idx, elem in enumerate(visible_initiators):
elem = str(elem).replace(':', '')
visible_initiators[idx] = elem
else:
LOG.debug("No initiators are in the nameserver "
"for SAN %s", fabric_name)
fabric_map = {
'initiator_port_wwn_list': visible_initiators,
'target_port_wwn_list': visible_targets
}
device_map[fabric_name] = fabric_map
LOG.debug("Device map for SAN context: %s", device_map)
return device_map
def get_nameserver_info(self):
"""Get name server data from fabric.
This method will return the connected node port wwn list(local
and remote) for the given switch fabric
"""
cli_output = None
nsinfo_list = []
try:
cli_output = self._get_switch_data(zone_constant.NS_SHOW)
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting nsshow info for fabric"))
if cli_output:
nsinfo_list = self._parse_ns_output(cli_output)
try:
cli_output = self._get_switch_data(zone_constant.NS_CAM_SHOW)
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting nscamshow"))
if cli_output:
nsinfo_list.extend(self._parse_ns_output(cli_output))
LOG.debug("Connector returning nsinfo-%s", nsinfo_list)
return nsinfo_list
def _get_switch_data(self, cmd):
stdin, stdout, stderr = None, None, None
utils.check_ssh_injection([cmd])
try:
stdin, stdout, stderr = self.client.exec_command(cmd)
switch_data = stdout.readlines()
except paramiko.SSHException as e:
msg = (_("SSH Command failed with error '%(err)s' "
"'%(command)s'") % {'err': six.text_type(e),
'command': cmd})
LOG.error(msg)
raise exception.FCSanLookupServiceException(message=msg)
finally:
if (stdin):
stdin.flush()
stdin.close()
if (stdout):
stdout.close()
if (stderr):
stderr.close()
return switch_data
def _parse_ns_output(self, switch_data):
"""Parses name server data.
Parses nameserver raw data and adds the device port wwns to the list
:returns list of device port wwn from ns info
"""
nsinfo_list = []
for line in switch_data:
if not(" NL " in line or " N " in line):
continue
linesplit = line.split(';')
if len(linesplit) > 2:
node_port_wwn = linesplit[2]
nsinfo_list.append(node_port_wwn)
else:
msg = _("Malformed nameserver string: %s") % line
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
return nsinfo_list
def get_formatted_wwn(self, wwn_str):
"""Utility API that formats WWN to insert ':'."""
if (len(wwn_str) != 16):
return wwn_str.lower()
else:
return (':'.join([wwn_str[i:i + 2]
for i in range(0, len(wwn_str), 2)])).lower()
| apache-2.0 |
clinton-hall/nzbToMedia | libs/common/mutagen/asf/_objects.py | 8 | 14952 | # -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# Copyright (C) 2006-2007 Lukas Lalinsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import struct
from mutagen._util import cdata, get_size
from mutagen._compat import text_type, xrange, izip
from mutagen._tags import PaddingInfo
from ._util import guid2bytes, bytes2guid, CODECS, ASFError, ASFHeaderError
from ._attrs import ASFBaseAttribute, ASFUnicodeAttribute
class BaseObject(object):
"""Base ASF object."""
GUID = None
_TYPES = {}
def __init__(self):
self.objects = []
self.data = b""
def parse(self, asf, data):
self.data = data
def render(self, asf):
data = self.GUID + struct.pack("<Q", len(self.data) + 24) + self.data
return data
def get_child(self, guid):
for obj in self.objects:
if obj.GUID == guid:
return obj
return None
@classmethod
def _register(cls, other):
cls._TYPES[other.GUID] = other
return other
@classmethod
def _get_object(cls, guid):
if guid in cls._TYPES:
return cls._TYPES[guid]()
else:
return UnknownObject(guid)
def __repr__(self):
return "<%s GUID=%s objects=%r>" % (
type(self).__name__, bytes2guid(self.GUID), self.objects)
def pprint(self):
l = []
l.append("%s(%s)" % (type(self).__name__, bytes2guid(self.GUID)))
for o in self.objects:
for e in o.pprint().splitlines():
l.append(" " + e)
return "\n".join(l)
class UnknownObject(BaseObject):
"""Unknown ASF object."""
def __init__(self, guid):
super(UnknownObject, self).__init__()
assert isinstance(guid, bytes)
self.GUID = guid
@BaseObject._register
class HeaderObject(BaseObject):
"""ASF header."""
GUID = guid2bytes("75B22630-668E-11CF-A6D9-00AA0062CE6C")
@classmethod
def parse_full(cls, asf, fileobj):
"""Raises ASFHeaderError"""
header = cls()
remaining_header, num_objects = cls.parse_size(fileobj)
remaining_header -= 30
for i in xrange(num_objects):
obj_header_size = 24
if remaining_header < obj_header_size:
raise ASFHeaderError("invalid header size")
data = fileobj.read(obj_header_size)
if len(data) != obj_header_size:
raise ASFHeaderError("truncated")
remaining_header -= obj_header_size
guid, size = struct.unpack("<16sQ", data)
obj = BaseObject._get_object(guid)
payload_size = size - obj_header_size
if remaining_header < payload_size:
raise ASFHeaderError("invalid object size")
remaining_header -= payload_size
try:
data = fileobj.read(payload_size)
except OverflowError:
# read doesn't take 64bit values
raise ASFHeaderError("invalid header size")
if len(data) != payload_size:
raise ASFHeaderError("truncated")
obj.parse(asf, data)
header.objects.append(obj)
return header
@classmethod
def parse_size(cls, fileobj):
"""Returns (size, num_objects)
Raises ASFHeaderError
"""
header = fileobj.read(30)
if len(header) != 30 or header[:16] != HeaderObject.GUID:
raise ASFHeaderError("Not an ASF file.")
return struct.unpack("<QL", header[16:28])
def render_full(self, asf, fileobj, available, padding_func):
# Render everything except padding
num_objects = 0
data = bytearray()
for obj in self.objects:
if obj.GUID == PaddingObject.GUID:
continue
data += obj.render(asf)
num_objects += 1
# calculate how much space we need at least
padding_obj = PaddingObject()
header_size = len(HeaderObject.GUID) + 14
padding_overhead = len(padding_obj.render(asf))
needed_size = len(data) + header_size + padding_overhead
# ask the user for padding adjustments
file_size = get_size(fileobj)
content_size = file_size - available
assert content_size >= 0
info = PaddingInfo(available - needed_size, content_size)
# add padding
padding = info._get_padding(padding_func)
padding_obj.parse(asf, b"\x00" * padding)
data += padding_obj.render(asf)
num_objects += 1
data = (HeaderObject.GUID +
struct.pack("<QL", len(data) + 30, num_objects) +
b"\x01\x02" + data)
return data
def parse(self, asf, data):
raise NotImplementedError
def render(self, asf):
raise NotImplementedError
@BaseObject._register
class ContentDescriptionObject(BaseObject):
"""Content description."""
GUID = guid2bytes("75B22633-668E-11CF-A6D9-00AA0062CE6C")
NAMES = [
u"Title",
u"Author",
u"Copyright",
u"Description",
u"Rating",
]
def parse(self, asf, data):
super(ContentDescriptionObject, self).parse(asf, data)
lengths = struct.unpack("<HHHHH", data[:10])
texts = []
pos = 10
for length in lengths:
end = pos + length
if length > 0:
texts.append(data[pos:end].decode("utf-16-le").strip(u"\x00"))
else:
texts.append(None)
pos = end
for key, value in izip(self.NAMES, texts):
if value is not None:
value = ASFUnicodeAttribute(value=value)
asf._tags.setdefault(self.GUID, []).append((key, value))
def render(self, asf):
def render_text(name):
value = asf.to_content_description.get(name)
if value is not None:
return text_type(value).encode("utf-16-le") + b"\x00\x00"
else:
return b""
texts = [render_text(x) for x in self.NAMES]
data = struct.pack("<HHHHH", *map(len, texts)) + b"".join(texts)
return self.GUID + struct.pack("<Q", 24 + len(data)) + data
@BaseObject._register
class ExtendedContentDescriptionObject(BaseObject):
"""Extended content description."""
GUID = guid2bytes("D2D0A440-E307-11D2-97F0-00A0C95EA850")
def parse(self, asf, data):
super(ExtendedContentDescriptionObject, self).parse(asf, data)
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in xrange(num_attributes):
name_length, = struct.unpack("<H", data[pos:pos + 2])
pos += 2
name = data[pos:pos + name_length]
name = name.decode("utf-16-le").strip("\x00")
pos += name_length
value_type, value_length = struct.unpack("<HH", data[pos:pos + 4])
pos += 4
value = data[pos:pos + value_length]
pos += value_length
attr = ASFBaseAttribute._get_type(value_type)(data=value)
asf._tags.setdefault(self.GUID, []).append((name, attr))
def render(self, asf):
attrs = asf.to_extended_content_description.items()
data = b"".join(attr.render(name) for (name, attr) in attrs)
data = struct.pack("<QH", 26 + len(data), len(attrs)) + data
return self.GUID + data
@BaseObject._register
class FilePropertiesObject(BaseObject):
"""File properties."""
GUID = guid2bytes("8CABDCA1-A947-11CF-8EE4-00C00C205365")
def parse(self, asf, data):
super(FilePropertiesObject, self).parse(asf, data)
length, _, preroll = struct.unpack("<QQQ", data[40:64])
# there are files where preroll is larger than length, limit to >= 0
asf.info.length = max((length / 10000000.0) - (preroll / 1000.0), 0.0)
@BaseObject._register
class StreamPropertiesObject(BaseObject):
"""Stream properties."""
GUID = guid2bytes("B7DC0791-A9B7-11CF-8EE6-00C00C205365")
def parse(self, asf, data):
super(StreamPropertiesObject, self).parse(asf, data)
channels, sample_rate, bitrate = struct.unpack("<HII", data[56:66])
asf.info.channels = channels
asf.info.sample_rate = sample_rate
asf.info.bitrate = bitrate * 8
@BaseObject._register
class CodecListObject(BaseObject):
"""Codec List"""
GUID = guid2bytes("86D15240-311D-11D0-A3A4-00A0C90348F6")
def _parse_entry(self, data, offset):
"""can raise cdata.error"""
type_, offset = cdata.uint16_le_from(data, offset)
units, offset = cdata.uint16_le_from(data, offset)
# utf-16 code units, not characters..
next_offset = offset + units * 2
try:
name = data[offset:next_offset].decode("utf-16-le").strip("\x00")
except UnicodeDecodeError:
name = u""
offset = next_offset
units, offset = cdata.uint16_le_from(data, offset)
next_offset = offset + units * 2
try:
desc = data[offset:next_offset].decode("utf-16-le").strip("\x00")
except UnicodeDecodeError:
desc = u""
offset = next_offset
bytes_, offset = cdata.uint16_le_from(data, offset)
next_offset = offset + bytes_
codec = u""
if bytes_ == 2:
codec_id = cdata.uint16_le_from(data, offset)[0]
if codec_id in CODECS:
codec = CODECS[codec_id]
offset = next_offset
return offset, type_, name, desc, codec
def parse(self, asf, data):
super(CodecListObject, self).parse(asf, data)
offset = 16
count, offset = cdata.uint32_le_from(data, offset)
for i in xrange(count):
try:
offset, type_, name, desc, codec = \
self._parse_entry(data, offset)
except cdata.error:
raise ASFError("invalid codec entry")
# go with the first audio entry
if type_ == 2:
name = name.strip()
desc = desc.strip()
asf.info.codec_type = codec
asf.info.codec_name = name
asf.info.codec_description = desc
return
@BaseObject._register
class PaddingObject(BaseObject):
"""Padding object"""
GUID = guid2bytes("1806D474-CADF-4509-A4BA-9AABCB96AAE8")
@BaseObject._register
class StreamBitratePropertiesObject(BaseObject):
"""Stream bitrate properties"""
GUID = guid2bytes("7BF875CE-468D-11D1-8D82-006097C9A2B2")
@BaseObject._register
class ContentEncryptionObject(BaseObject):
"""Content encryption"""
GUID = guid2bytes("2211B3FB-BD23-11D2-B4B7-00A0C955FC6E")
@BaseObject._register
class ExtendedContentEncryptionObject(BaseObject):
"""Extended content encryption"""
GUID = guid2bytes("298AE614-2622-4C17-B935-DAE07EE9289C")
@BaseObject._register
class HeaderExtensionObject(BaseObject):
"""Header extension."""
GUID = guid2bytes("5FBF03B5-A92E-11CF-8EE3-00C00C205365")
def parse(self, asf, data):
super(HeaderExtensionObject, self).parse(asf, data)
datasize, = struct.unpack("<I", data[18:22])
datapos = 0
while datapos < datasize:
guid, size = struct.unpack(
"<16sQ", data[22 + datapos:22 + datapos + 24])
obj = BaseObject._get_object(guid)
obj.parse(asf, data[22 + datapos + 24:22 + datapos + size])
self.objects.append(obj)
datapos += size
def render(self, asf):
data = bytearray()
for obj in self.objects:
# some files have the padding in the extension header, but we
# want to add it at the end of the top level header. Just
# skip padding at this level.
if obj.GUID == PaddingObject.GUID:
continue
data += obj.render(asf)
return (self.GUID + struct.pack("<Q", 24 + 16 + 6 + len(data)) +
b"\x11\xD2\xD3\xAB\xBA\xA9\xcf\x11" +
b"\x8E\xE6\x00\xC0\x0C\x20\x53\x65" +
b"\x06\x00" + struct.pack("<I", len(data)) + data)
@BaseObject._register
class MetadataObject(BaseObject):
"""Metadata description."""
GUID = guid2bytes("C5F8CBEA-5BAF-4877-8467-AA8C44FA4CCA")
def parse(self, asf, data):
super(MetadataObject, self).parse(asf, data)
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in xrange(num_attributes):
(reserved, stream, name_length, value_type,
value_length) = struct.unpack("<HHHHI", data[pos:pos + 12])
pos += 12
name = data[pos:pos + name_length]
name = name.decode("utf-16-le").strip("\x00")
pos += name_length
value = data[pos:pos + value_length]
pos += value_length
args = {'data': value, 'stream': stream}
if value_type == 2:
args['dword'] = False
attr = ASFBaseAttribute._get_type(value_type)(**args)
asf._tags.setdefault(self.GUID, []).append((name, attr))
def render(self, asf):
attrs = asf.to_metadata.items()
data = b"".join([attr.render_m(name) for (name, attr) in attrs])
return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) +
data)
@BaseObject._register
class MetadataLibraryObject(BaseObject):
"""Metadata library description."""
GUID = guid2bytes("44231C94-9498-49D1-A141-1D134E457054")
def parse(self, asf, data):
super(MetadataLibraryObject, self).parse(asf, data)
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in xrange(num_attributes):
(language, stream, name_length, value_type,
value_length) = struct.unpack("<HHHHI", data[pos:pos + 12])
pos += 12
name = data[pos:pos + name_length]
name = name.decode("utf-16-le").strip("\x00")
pos += name_length
value = data[pos:pos + value_length]
pos += value_length
args = {'data': value, 'language': language, 'stream': stream}
if value_type == 2:
args['dword'] = False
attr = ASFBaseAttribute._get_type(value_type)(**args)
asf._tags.setdefault(self.GUID, []).append((name, attr))
def render(self, asf):
attrs = asf.to_metadata_library
data = b"".join([attr.render_ml(name) for (name, attr) in attrs])
return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) +
data)
| gpl-3.0 |
Kore-Core/kore | qa/rpc-tests/p2p-acceptblock.py | 1 | 12362 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Kore Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import KoreTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(KoreTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "kored"),
help="kored binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print("First height 2 block accepted by both nodes")
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in range(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print("Second height 2 block accepted only from whitelisted peer")
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in range(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print("Unrequested more-work block accepted from non-whitelisted peer")
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print("Successfully reorged to length 3 chain from whitelisted peer")
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in range(2):
for i in range(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print("Unrequested block too far-ahead not processed")
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print("Unrequested block far ahead of tip accepted from whitelisted peer")
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print("Successfully reorged to longer chain from non-whitelisted peer")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| mit |
m11s/MissionPlanner | Lib/site-packages/numpy/lib/benchmarks/bench_arraysetops.py | 65 | 1615 | import numpy as np
import time
from numpy.lib.arraysetops import *
def bench_unique1d( plot_results = False ):
exponents = np.linspace( 2, 7, 9 )
ratios = []
nItems = []
dt1s = []
dt2s = []
for ii in exponents:
nItem = 10 ** ii
print 'using %d items:' % nItem
a = np.fix( nItem / 10 * np.random.random( nItem ) )
print 'unique:'
tt = time.clock()
b = np.unique( a )
dt1 = time.clock() - tt
print dt1
print 'unique1d:'
tt = time.clock()
c = unique1d( a )
dt2 = time.clock() - tt
print dt2
if dt1 < 1e-8:
ratio = 'ND'
else:
ratio = dt2 / dt1
print 'ratio:', ratio
print 'nUnique: %d == %d\n' % (len( b ), len( c ))
nItems.append( nItem )
ratios.append( ratio )
dt1s.append( dt1 )
dt2s.append( dt2 )
assert np.alltrue( b == c )
print nItems
print dt1s
print dt2s
print ratios
if plot_results:
import pylab
def plotMe( fig, fun, nItems, dt1s, dt2s ):
pylab.figure( fig )
fun( nItems, dt1s, 'g-o', linewidth = 2, markersize = 8 )
fun( nItems, dt2s, 'b-x', linewidth = 2, markersize = 8 )
pylab.legend( ('unique', 'unique1d' ) )
pylab.xlabel( 'nItem' )
pylab.ylabel( 'time [s]' )
plotMe( 1, pylab.loglog, nItems, dt1s, dt2s )
plotMe( 2, pylab.plot, nItems, dt1s, dt2s )
pylab.show()
if __name__ == '__main__':
bench_unique1d( plot_results = True )
| gpl-3.0 |
zhjunlang/kbengine | kbe/src/lib/python/Tools/pybench/NewInstances.py | 92 | 1561 | from pybench import Test
# Check for new-style class support:
try:
class c(object):
pass
except NameError:
raise ImportError
###
class CreateNewInstances(Test):
version = 2.0
operations = 3 + 7 + 4
rounds = 60000
def test(self):
class c(object):
pass
class d(object):
def __init__(self,a,b,c):
self.a = a
self.b = b
self.c = c
class e(object):
def __init__(self,a,b,c=4):
self.a = a
self.b = b
self.c = c
self.d = a
self.e = b
self.f = c
for i in range(self.rounds):
o = c()
o1 = c()
o2 = c()
p = d(i,i,3)
p1 = d(i,i,3)
p2 = d(i,3,3)
p3 = d(3,i,3)
p4 = d(i,i,i)
p5 = d(3,i,3)
p6 = d(i,i,i)
q = e(i,i,3)
q1 = e(i,i,3)
q2 = e(i,i,3)
q3 = e(i,i)
def calibrate(self):
class c(object):
pass
class d(object):
def __init__(self,a,b,c):
self.a = a
self.b = b
self.c = c
class e(object):
def __init__(self,a,b,c=4):
self.a = a
self.b = b
self.c = c
self.d = a
self.e = b
self.f = c
for i in range(self.rounds):
pass
| lgpl-3.0 |
vitan/hue | desktop/core/ext-py/markdown/markdown/blockprocessors.py | 109 | 17773 | """
CORE MARKDOWN BLOCKPARSER
=============================================================================
This parser handles basic parsing of Markdown blocks. It doesn't concern itself
with inline elements such as **bold** or *italics*, but rather just catches
blocks, lists, quotes, etc.
The BlockParser is made up of a bunch of BlockProssors, each handling a
different type of block. Extensions may add/replace/remove BlockProcessors
as they need to alter how markdown blocks are parsed.
"""
import re
import markdown
class BlockProcessor:
""" Base class for block processors.
Each subclass will provide the methods below to work with the source and
tree. Each processor will need to define it's own ``test`` and ``run``
methods. The ``test`` method should return True or False, to indicate
whether the current block should be processed by this processor. If the
test passes, the parser will call the processors ``run`` method.
"""
def __init__(self, parser=None):
self.parser = parser
def lastChild(self, parent):
""" Return the last child of an etree element. """
if len(parent):
return parent[-1]
else:
return None
def detab(self, text):
""" Remove a tab from the front of each line of the given text. """
newtext = []
lines = text.split('\n')
for line in lines:
if line.startswith(' '*markdown.TAB_LENGTH):
newtext.append(line[markdown.TAB_LENGTH:])
elif not line.strip():
newtext.append('')
else:
break
return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
def looseDetab(self, text, level=1):
""" Remove a tab from front of lines but allowing dedented lines. """
lines = text.split('\n')
for i in range(len(lines)):
if lines[i].startswith(' '*markdown.TAB_LENGTH*level):
lines[i] = lines[i][markdown.TAB_LENGTH*level:]
return '\n'.join(lines)
def test(self, parent, block):
""" Test for block type. Must be overridden by subclasses.
As the parser loops through processors, it will call the ``test`` method
on each to determine if the given block of text is of that type. This
method must return a boolean ``True`` or ``False``. The actual method of
testing is left to the needs of that particular block type. It could
be as simple as ``block.startswith(some_string)`` or a complex regular
expression. As the block type may be different depending on the parent
of the block (i.e. inside a list), the parent etree element is also
provided and may be used as part of the test.
Keywords:
* ``parent``: A etree element which will be the parent of the block.
* ``block``: A block of text from the source which has been split at
blank lines.
"""
pass
def run(self, parent, blocks):
""" Run processor. Must be overridden by subclasses.
When the parser determines the appropriate type of a block, the parser
will call the corresponding processor's ``run`` method. This method
should parse the individual lines of the block and append them to
the etree.
Note that both the ``parent`` and ``etree`` keywords are pointers
to instances of the objects which should be edited in place. Each
processor must make changes to the existing objects as there is no
mechanism to return new/different objects to replace them.
This means that this method should be adding SubElements or adding text
to the parent, and should remove (``pop``) or add (``insert``) items to
the list of blocks.
Keywords:
* ``parent``: A etree element which is the parent of the current block.
* ``blocks``: A list of all remaining blocks of the document.
"""
pass
class ListIndentProcessor(BlockProcessor):
""" Process children of list items.
Example:
* a list item
process this part
or this part
"""
INDENT_RE = re.compile(r'^(([ ]{%s})+)'% markdown.TAB_LENGTH)
ITEM_TYPES = ['li']
LIST_TYPES = ['ul', 'ol']
def test(self, parent, block):
return block.startswith(' '*markdown.TAB_LENGTH) and \
not self.parser.state.isstate('detabbed') and \
(parent.tag in self.ITEM_TYPES or \
(len(parent) and parent[-1] and \
(parent[-1].tag in self.LIST_TYPES)
)
)
def run(self, parent, blocks):
block = blocks.pop(0)
level, sibling = self.get_level(parent, block)
block = self.looseDetab(block, level)
self.parser.state.set('detabbed')
if parent.tag in self.ITEM_TYPES:
# The parent is already a li. Just parse the child block.
self.parser.parseBlocks(parent, [block])
elif sibling.tag in self.ITEM_TYPES:
# The sibling is a li. Use it as parent.
self.parser.parseBlocks(sibling, [block])
elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
# The parent is a list (``ol`` or ``ul``) which has children.
# Assume the last child li is the parent of this block.
if sibling[-1].text:
# If the parent li has text, that text needs to be moved to a p
block = '%s\n\n%s' % (sibling[-1].text, block)
sibling[-1].text = ''
self.parser.parseChunk(sibling[-1], block)
else:
self.create_item(sibling, block)
self.parser.state.reset()
def create_item(self, parent, block):
""" Create a new li and parse the block with it as the parent. """
li = markdown.etree.SubElement(parent, 'li')
self.parser.parseBlocks(li, [block])
def get_level(self, parent, block):
""" Get level of indent based on list level. """
# Get indent level
m = self.INDENT_RE.match(block)
if m:
indent_level = len(m.group(1))/markdown.TAB_LENGTH
else:
indent_level = 0
if self.parser.state.isstate('list'):
# We're in a tightlist - so we already are at correct parent.
level = 1
else:
# We're in a looselist - so we need to find parent.
level = 0
# Step through children of tree to find matching indent level.
while indent_level > level:
child = self.lastChild(parent)
if child and (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES):
if child.tag in self.LIST_TYPES:
level += 1
parent = child
else:
# No more child levels. If we're short of indent_level,
# we have a code block. So we stop here.
break
return level, parent
class CodeBlockProcessor(BlockProcessor):
""" Process code blocks. """
def test(self, parent, block):
return block.startswith(' '*markdown.TAB_LENGTH)
def run(self, parent, blocks):
sibling = self.lastChild(parent)
block = blocks.pop(0)
theRest = ''
if sibling and sibling.tag == "pre" and len(sibling) \
and sibling[0].tag == "code":
# The previous block was a code block. As blank lines do not start
# new code blocks, append this block to the previous, adding back
# linebreaks removed from the split into a list.
code = sibling[0]
block, theRest = self.detab(block)
code.text = markdown.AtomicString('%s\n%s\n' % (code.text, block.rstrip()))
else:
# This is a new codeblock. Create the elements and insert text.
pre = markdown.etree.SubElement(parent, 'pre')
code = markdown.etree.SubElement(pre, 'code')
block, theRest = self.detab(block)
code.text = markdown.AtomicString('%s\n' % block.rstrip())
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
class BlockQuoteProcessor(BlockProcessor):
RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # Lines before blockquote
# Pass lines before blockquote in recursively for parsing forst.
self.parser.parseBlocks(parent, [before])
# Remove ``> `` from begining of each line.
block = '\n'.join([self.clean(line) for line in
block[m.start():].split('\n')])
sibling = self.lastChild(parent)
if sibling and sibling.tag == "blockquote":
# Previous block was a blockquote so set that as this blocks parent
quote = sibling
else:
# This is a new blockquote. Create a new parent element.
quote = markdown.etree.SubElement(parent, 'blockquote')
# Recursively parse block with blockquote as parent.
self.parser.parseChunk(quote, block)
def clean(self, line):
""" Remove ``>`` from beginning of a line. """
m = self.RE.match(line)
if line.strip() == ">":
return ""
elif m:
return m.group(2)
else:
return line
class OListProcessor(BlockProcessor):
""" Process ordered list blocks. """
TAG = 'ol'
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
RE = re.compile(r'^[ ]{0,3}\d+\.[ ]+(.*)')
# Detect items on secondary lines. they can be of either list type.
CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.)|[*+-])[ ]+(.*)')
# Detect indented (nested) items of either type
INDENT_RE = re.compile(r'^[ ]{4,7}((\d+\.)|[*+-])[ ]+.*')
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
# Check fr multiple items in one block.
items = self.get_items(blocks.pop(0))
sibling = self.lastChild(parent)
if sibling and sibling.tag in ['ol', 'ul']:
# Previous block was a list item, so set that as parent
lst = sibling
# make sure previous item is in a p.
if len(lst) and lst[-1].text and not len(lst[-1]):
p = markdown.etree.SubElement(lst[-1], 'p')
p.text = lst[-1].text
lst[-1].text = ''
# parse first block differently as it gets wrapped in a p.
li = markdown.etree.SubElement(lst, 'li')
self.parser.state.set('looselist')
firstitem = items.pop(0)
self.parser.parseBlocks(li, [firstitem])
self.parser.state.reset()
else:
# This is a new list so create parent with appropriate tag.
lst = markdown.etree.SubElement(parent, self.TAG)
self.parser.state.set('list')
# Loop through items in block, recursively parsing each with the
# appropriate parent.
for item in items:
if item.startswith(' '*markdown.TAB_LENGTH):
# Item is indented. Parse with last item as parent
self.parser.parseBlocks(lst[-1], [item])
else:
# New item. Create li and parse with it as parent
li = markdown.etree.SubElement(lst, 'li')
self.parser.parseBlocks(li, [item])
self.parser.state.reset()
def get_items(self, block):
""" Break a block into list items. """
items = []
for line in block.split('\n'):
m = self.CHILD_RE.match(line)
if m:
# This is a new item. Append
items.append(m.group(3))
elif self.INDENT_RE.match(line):
# This is an indented (possibly nested) item.
if items[-1].startswith(' '*markdown.TAB_LENGTH):
# Previous item was indented. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
else:
items.append(line)
else:
# This is another line of previous item. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
return items
class UListProcessor(OListProcessor):
""" Process unordered list blocks. """
TAG = 'ul'
RE = re.compile(r'^[ ]{0,3}[*+-][ ]+(.*)')
class HashHeaderProcessor(BlockProcessor):
""" Process Hash Headers. """
# Detect a header at start of any line in block
RE = re.compile(r'(^|\n)(?P<level>#{1,6})(?P<header>.*?)#*(\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block.
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
h = markdown.etree.SubElement(parent, 'h%d' % len(m.group('level')))
h.text = m.group('header').strip()
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else:
# This should never happen, but just in case...
message(CRITICAL, "We've got a problem header!")
class SetextHeaderProcessor(BlockProcessor):
""" Process Setext-style Headers. """
# Detect Setext-style header. Must be first 2 lines of block.
RE = re.compile(r'^.*?\n[=-]{3,}', re.MULTILINE)
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
lines = blocks.pop(0).split('\n')
# Determine level. ``=`` is 1 and ``-`` is 2.
if lines[1].startswith('='):
level = 1
else:
level = 2
h = markdown.etree.SubElement(parent, 'h%d' % level)
h.text = lines[0].strip()
if len(lines) > 2:
# Block contains additional lines. Add to master blocks for later.
blocks.insert(0, '\n'.join(lines[2:]))
class HRProcessor(BlockProcessor):
""" Process Horizontal Rules. """
RE = r'[ ]{0,3}(?P<ch>[*_-])[ ]?((?P=ch)[ ]?){2,}[ ]*'
# Detect hr on any line of a block.
SEARCH_RE = re.compile(r'(^|\n)%s(\n|$)' % RE)
# Match a hr on a single line of text.
MATCH_RE = re.compile(r'^%s$' % RE)
def test(self, parent, block):
return bool(self.SEARCH_RE.search(block))
def run(self, parent, blocks):
lines = blocks.pop(0).split('\n')
prelines = []
# Check for lines in block before hr.
for line in lines:
m = self.MATCH_RE.match(line)
if m:
break
else:
prelines.append(line)
if len(prelines):
# Recursively parse lines before hr so they get parsed first.
self.parser.parseBlocks(parent, ['\n'.join(prelines)])
# create hr
hr = markdown.etree.SubElement(parent, 'hr')
# check for lines in block after hr.
lines = lines[len(prelines)+1:]
if len(lines):
# Add lines after hr to master blocks for later parsing.
blocks.insert(0, '\n'.join(lines))
class EmptyBlockProcessor(BlockProcessor):
""" Process blocks and start with an empty line. """
# Detect a block that only contains whitespace
# or only whitespace on the first line.
RE = re.compile(r'^\s*\n')
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.match(block)
if m:
# Add remaining line to master blocks for later.
blocks.insert(0, block[m.end():])
sibling = self.lastChild(parent)
if sibling and sibling.tag == 'pre' and sibling[0] and \
sibling[0].tag == 'code':
# Last block is a codeblock. Append to preserve whitespace.
sibling[0].text = markdown.AtomicString('%s/n/n/n' % sibling[0].text )
class ParagraphProcessor(BlockProcessor):
""" Process Paragraph blocks. """
def test(self, parent, block):
return True
def run(self, parent, blocks):
block = blocks.pop(0)
if block.strip():
# Not a blank block. Add to parent, otherwise throw it away.
if self.parser.state.isstate('list'):
# The parent is a tight-list. Append to parent.text
if parent.text:
parent.text = '%s\n%s' % (parent.text, block)
else:
parent.text = block.lstrip()
else:
# Create a regular paragraph
p = markdown.etree.SubElement(parent, 'p')
p.text = block.lstrip()
| apache-2.0 |
peoplepower/composer-sdk-python | com.ppc.Lesson5-SpaceTime/intelligence/lesson5/index.py | 3 | 4033 | # This is an extremely important file for creating your own bot microservices.
#
# It dynamically connects microservices (your new application-layer features and services) with the representative
# models (locations and devices) provided by com.ppc.Bot.
#
# Your microservices extend and implement the event-driven interfaces provided in the intelligence.py file,
# and can either add features to devices or services to locations.
#
# A "device microservice" adds a new feature to a single type of device. Each instance of that device will have
# its own set of device microservices that extend the behavior of the base device. To add a device microservice,
# follow the example below by simply specifying the device type, and what modules/classes it should include,
# inside the DEVICE_MICROSERVICES dictionary. Device microservices only pay attention to a single device, and
# will not trigger off of device events from other devices. The device microservice's 'parent' is a Device object which
# will extend device.py.
#
# A "location microservice" adds new services across a location (your home). A location can contain multiple devices, so
# these types of microservices can effectively coordinate the activities between multiple devices. Again, to add
# a location microservice, follow the example below by adding a new module/class to the
# LOCATION_MICROSERVICES list. Location microservices trigger off of everything. The location microservice's 'parent' is
# a Location object from location.py.
#
# Note the recommended naming conventions.
#
# You can also place your microservices into their own package for easy deployment into other bots. Include
# an index.py file for each package of microservices, and upon generating the bot, the botengine will merge
# all index.py files and therefore automatically aggregate all available microservices into your final service.
#
#
# Deleting a microservice file from an actively deployed bot can cause existing bot instances to be unable to unpickle their memory,
# which means they lose all memory and start over again as if brand new. There is a process for properly removing a
# microservice without causing a bot to lose its memory:
#
# 1. Remove references to the microservice from memory. In the case of the microservices below, you could simply
# delete the reference to the module from this file. DO NOT DELETE the original microservice file.
# Commit, Publish. The microservice will be removed from memory on the next executions of active bots.
#
# 2. After every active bot has executed at least once, you can now safely delete the entire .py microservice module.
# Again, commit and publish. The next time an active bot executes, because it has no memory of the module,
# it will be able to unpickle its memory correctly. And now your project is free of that old microservice.
#
# Since this file is loaded as a JSON structure during the generation of the bot, remember to remove all
# dangling commas at the end of your JSON objects.
MICROSERVICES = {
# Map specific device types to a list of microservices
"DEVICE_MICROSERVICES": {
# This is a dictionary structure, where the keys are device types, and the value is a
# list of all the microservices to add to that device type.
# Use "botengine --device_types" to see the available device types for your brand / server.
# Entry Sensors
10014: [
{"module": "intelligence.lesson5.device_entrytimer_microservice", "class": "DeviceEntryTimerMicroservice"}
],
},
# Map locations to their microservices
"LOCATION_MICROSERVICES": [
# A location is like your home. This is a list of microservices to add to your location, which listen to
# and coordinate devices across your entire location. Location microservices trigger off of all data inputs
# from all devices.
{"module": "intelligence.lesson5.location_alarm_microservice", "class": "LocationAlarmMicroservice"}
]
}
| apache-2.0 |
HalcyonChimera/osf.io | addons/forward/models.py | 18 | 1050 | # -*- coding: utf-8 -*-
from addons.base.models import BaseNodeSettings
from dirtyfields import DirtyFieldsMixin
from django.db import models
from osf.exceptions import ValidationValueError
from osf.models.validators import validate_no_html
class NodeSettings(DirtyFieldsMixin, BaseNodeSettings):
complete = True
has_auth = True
url = models.URLField(blank=True, null=True, max_length=255) # 242 on prod
label = models.TextField(blank=True, null=True, validators=[validate_no_html])
@property
def link_text(self):
return self.label if self.label else self.url
def on_delete(self):
self.reset()
def reset(self):
self.url = None
self.label = None
def after_register(self, node, registration, user, save=True):
clone = self.clone()
clone.owner = registration
clone.on_add()
clone.save()
return clone, None
def clean(self):
if self.url and self.owner._id in self.url:
raise ValidationValueError('Circular URL')
| apache-2.0 |
peterlauri/django | tests/custom_methods/models.py | 343 | 1265 | """
Giving models custom methods
Any method you add to a model will be available to instances.
"""
import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
def __str__(self):
return self.headline
def was_published_today(self):
return self.pub_date == datetime.date.today()
def articles_from_same_day_1(self):
return Article.objects.filter(pub_date=self.pub_date).exclude(id=self.id)
def articles_from_same_day_2(self):
"""
Verbose version of get_articles_from_same_day_1, which does a custom
database query for the sake of demonstration.
"""
from django.db import connection
with connection.cursor() as cursor:
cursor.execute("""
SELECT id, headline, pub_date
FROM custom_methods_article
WHERE pub_date = %s
AND id != %s""", [connection.ops.adapt_datefield_value(self.pub_date),
self.id])
return [self.__class__(*row) for row in cursor.fetchall()]
| bsd-3-clause |
emceethemouth/kernel_mainline | tools/perf/scripts/python/stat-cpi.py | 404 | 2391 | #!/usr/bin/env python
data = {}
times = []
threads = []
cpus = []
def get_key(time, event, cpu, thread):
return "%d-%s-%d-%d" % (time, event, cpu, thread)
def store_key(time, cpu, thread):
if (time not in times):
times.append(time)
if (cpu not in cpus):
cpus.append(cpu)
if (thread not in threads):
threads.append(thread)
def store(time, event, cpu, thread, val, ena, run):
#print "event %s cpu %d, thread %d, time %d, val %d, ena %d, run %d" % \
# (event, cpu, thread, time, val, ena, run)
store_key(time, cpu, thread)
key = get_key(time, event, cpu, thread)
data[key] = [ val, ena, run]
def get(time, event, cpu, thread):
key = get_key(time, event, cpu, thread)
return data[key][0]
def stat__cycles_k(cpu, thread, time, val, ena, run):
store(time, "cycles", cpu, thread, val, ena, run);
def stat__instructions_k(cpu, thread, time, val, ena, run):
store(time, "instructions", cpu, thread, val, ena, run);
def stat__cycles_u(cpu, thread, time, val, ena, run):
store(time, "cycles", cpu, thread, val, ena, run);
def stat__instructions_u(cpu, thread, time, val, ena, run):
store(time, "instructions", cpu, thread, val, ena, run);
def stat__cycles(cpu, thread, time, val, ena, run):
store(time, "cycles", cpu, thread, val, ena, run);
def stat__instructions(cpu, thread, time, val, ena, run):
store(time, "instructions", cpu, thread, val, ena, run);
def stat__interval(time):
for cpu in cpus:
for thread in threads:
cyc = get(time, "cycles", cpu, thread)
ins = get(time, "instructions", cpu, thread)
cpi = 0
if ins != 0:
cpi = cyc/float(ins)
print "%15f: cpu %d, thread %d -> cpi %f (%d/%d)" % (time/(float(1000000000)), cpu, thread, cpi, cyc, ins)
def trace_end():
pass
# XXX trace_end callback could be used as an alternative place
# to compute same values as in the script above:
#
# for time in times:
# for cpu in cpus:
# for thread in threads:
# cyc = get(time, "cycles", cpu, thread)
# ins = get(time, "instructions", cpu, thread)
#
# if ins != 0:
# cpi = cyc/float(ins)
#
# print "time %.9f, cpu %d, thread %d -> cpi %f" % (time/(float(1000000000)), cpu, thread, cpi)
| gpl-2.0 |
dlenski/tapiriik | tapiriik/services/Dropbox/dropbox.py | 10 | 18009 | from tapiriik.settings import WEB_ROOT, DROPBOX_APP_KEY, DROPBOX_APP_SECRET, DROPBOX_FULL_APP_KEY, DROPBOX_FULL_APP_SECRET
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.api import APIException, ServiceExceptionScope, UserException, UserExceptionType, APIExcludeActivity, ServiceException
from tapiriik.services.interchange import ActivityType, UploadedActivity
from tapiriik.services.exception_tools import strip_context
from tapiriik.services.gpx import GPXIO
from tapiriik.services.tcx import TCXIO
from tapiriik.database import cachedb, redis
from dropbox import client, rest, session
from django.core.urlresolvers import reverse
import re
import lxml
from datetime import datetime, timedelta
import logging
import bson
import pickle
logger = logging.getLogger(__name__)
class DropboxService(ServiceBase):
ID = "dropbox"
DisplayName = "Dropbox"
DisplayAbbreviation = "DB"
AuthenticationType = ServiceAuthenticationType.OAuth
AuthenticationNoFrame = True # damn dropbox, spoiling my slick UI
Configurable = True
ReceivesStationaryActivities = False
ActivityTaggingTable = { # earlier items have precedence over
ActivityType.Running: "run",
ActivityType.MountainBiking: "m(oun)?t(ai)?n\s*bik(e|ing)",
ActivityType.Cycling: "(cycl(e|ing)|bik(e|ing))",
ActivityType.Walking: "walk",
ActivityType.Hiking: "hik(e|ing)",
ActivityType.DownhillSkiing: "(downhill|down(hill)?\s*ski(ing)?)",
ActivityType.CrossCountrySkiing: "(xc|cross.*country)\s*ski(ing)?",
ActivityType.Snowboarding: "snowboard(ing)?",
ActivityType.Skating: "skat(e|ing)?",
ActivityType.Swimming: "swim",
ActivityType.Wheelchair: "wheelchair",
ActivityType.Rowing: "row",
ActivityType.Elliptical: "elliptical",
ActivityType.Other: "(other|unknown)"
}
ConfigurationDefaults = {"SyncRoot": "/", "UploadUntagged": False, "Format":"tcx", "Filename":"%Y-%m-%d_#NAME_#TYPE"}
SupportsHR = SupportsCadence = True
SupportedActivities = ActivityTaggingTable.keys()
def _getClient(self, serviceRec):
if serviceRec.Authorization["Full"]:
sess = session.DropboxSession(DROPBOX_FULL_APP_KEY, DROPBOX_FULL_APP_SECRET, "dropbox")
else:
sess = session.DropboxSession(DROPBOX_APP_KEY, DROPBOX_APP_SECRET, "app_folder")
sess.set_token(serviceRec.Authorization["Key"], serviceRec.Authorization["Secret"])
return client.DropboxClient(sess)
def WebInit(self):
self.UserAuthorizationURL = reverse("oauth_redirect", kwargs={"service": "dropbox"})
pass
def RequiresConfiguration(self, svcRec):
return svcRec.Authorization["Full"] and ("SyncRoot" not in svcRec.Config or not len(svcRec.Config["SyncRoot"]))
def GenerateUserAuthorizationURL(self, level=None):
full = level == "full"
if full:
sess = session.DropboxSession(DROPBOX_FULL_APP_KEY, DROPBOX_FULL_APP_SECRET, "dropbox")
else:
sess = session.DropboxSession(DROPBOX_APP_KEY, DROPBOX_APP_SECRET, "app_folder")
reqToken = sess.obtain_request_token()
redis.setex("dropbox:oauth:%s" % reqToken.key, pickle.dumps(reqToken), timedelta(hours=24))
return sess.build_authorize_url(reqToken, oauth_callback=WEB_ROOT + reverse("oauth_return", kwargs={"service": "dropbox", "level": "full" if full else "normal"}))
def _getUserId(self, serviceRec):
info = self._getClient(serviceRec).account_info()
return info['uid']
def RetrieveAuthorizationToken(self, req, level):
from tapiriik.services import Service
tokenKey = req.GET["oauth_token"]
redis_key = "dropbox:oauth:%s" % tokenKey
token = redis.get(redis_key)
assert token
token = pickle.loads(token)
redis.delete(redis_key)
full = level == "full"
if full:
sess = session.DropboxSession(DROPBOX_FULL_APP_KEY, DROPBOX_FULL_APP_SECRET, "dropbox")
else:
sess = session.DropboxSession(DROPBOX_APP_KEY, DROPBOX_APP_SECRET, "app_folder")
accessToken = sess.obtain_access_token(token)
uid = int(req.GET["uid"]) # duh!
return (uid, {"Key": accessToken.key, "Secret": accessToken.secret, "Full": full})
def RevokeAuthorization(self, serviceRecord):
pass # :(
def ConfigurationUpdating(self, svcRec, newConfig, oldConfig):
from tapiriik.sync import Sync
from tapiriik.auth import User
if newConfig["SyncRoot"] != oldConfig["SyncRoot"]:
Sync.ScheduleImmediateSync(User.AuthByService(svcRec), True)
cachedb.dropbox_cache.update({"ExternalID": svcRec.ExternalID}, {"$unset": {"Structure": None}})
def _raiseDbException(self, e):
if e.status == 401:
raise APIException("Authorization error - status " + str(e.status) + " reason " + str(e.error_msg) + " body " + str(e.body), block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
if e.status == 507:
raise APIException("Dropbox quota error", block=True, user_exception=UserException(UserExceptionType.AccountFull, intervention_required=True))
raise APIException("API failure - status " + str(e.status) + " reason " + str(e.reason) + " body " + str(e.error_msg))
def _folderRecurse(self, structCache, dbcl, path):
hash = None
existingRecord = [x for x in structCache if x["Path"] == path]
children = [x for x in structCache if x["Path"].startswith(path) and x["Path"] != path]
existingRecord = existingRecord[0] if len(existingRecord) else None
if existingRecord:
hash = existingRecord["Hash"]
try:
dirmetadata = dbcl.metadata(path, hash=hash)
except rest.ErrorResponse as e:
if e.status == 304:
for child in children:
self._folderRecurse(structCache, dbcl, child["Path"]) # still need to recurse for children
return # nothing new to update here
if e.status == 404:
# dir doesn't exist any more, delete it and all children
structCache[:] = (x for x in structCache if x != existingRecord and x not in children)
return
self._raiseDbException(e)
if not existingRecord:
existingRecord = {"Files": [], "Path": dirmetadata["path"]}
structCache.append(existingRecord)
existingRecord["Hash"] = dirmetadata["hash"]
existingRecord["Files"] = []
curDirs = []
for file in dirmetadata["contents"]:
if file["is_dir"]:
curDirs.append(file["path"])
self._folderRecurse(structCache, dbcl, file["path"])
else:
if not file["path"].lower().endswith(".gpx") and not file["path"].lower().endswith(".tcx"):
continue # another kind of file
existingRecord["Files"].append({"Rev": file["rev"], "Path": file["path"]})
structCache[:] = (x for x in structCache if x["Path"] in curDirs or x not in children) # delete ones that don't exist
def _tagActivity(self, text):
for act, pattern in self.ActivityTaggingTable.items():
if re.search(pattern, text, re.IGNORECASE):
return act
return None
def _getActivity(self, serviceRecord, dbcl, path):
activityData = None
try:
f, metadata = dbcl.get_file_and_metadata(path)
except rest.ErrorResponse as e:
self._raiseDbException(e)
if not activityData:
activityData = f.read()
try:
if path.lower().endswith(".tcx"):
act = TCXIO.Parse(activityData)
else:
act = GPXIO.Parse(activityData)
except ValueError as e:
raise APIExcludeActivity("Invalid GPX/TCX " + str(e), activity_id=path, user_exception=UserException(UserExceptionType.Corrupt))
except lxml.etree.XMLSyntaxError as e:
raise APIExcludeActivity("LXML parse error " + str(e), activity_id=path, user_exception=UserException(UserExceptionType.Corrupt))
return act, metadata["rev"]
def DownloadActivityList(self, svcRec, exhaustive=False):
dbcl = self._getClient(svcRec)
if not svcRec.Authorization["Full"]:
syncRoot = "/"
else:
syncRoot = svcRec.Config["SyncRoot"]
cache = cachedb.dropbox_cache.find_one({"ExternalID": svcRec.ExternalID})
if cache is None:
cache = {"ExternalID": svcRec.ExternalID, "Structure": [], "Activities": {}}
if "Structure" not in cache:
cache["Structure"] = []
self._folderRecurse(cache["Structure"], dbcl, syncRoot)
activities = []
exclusions = []
for dir in cache["Structure"]:
for file in dir["Files"]:
path = file["Path"]
if svcRec.Authorization["Full"]:
relPath = path.replace(syncRoot, "", 1)
else:
relPath = path.replace("/Apps/tapiriik/", "", 1) # dropbox api is meh api
hashedRelPath = self._hash_path(relPath)
if hashedRelPath in cache["Activities"]:
existing = cache["Activities"][hashedRelPath]
else:
existing = None
if not existing:
# Continue to use the old records keyed by UID where possible
existing = [(k, x) for k, x in cache["Activities"].items() if "Path" in x and x["Path"] == relPath] # path is relative to syncroot to reduce churn if they relocate it
existing = existing[0] if existing else None
if existing is not None:
existUID, existing = existing
existing["UID"] = existUID
if existing and existing["Rev"] == file["Rev"]:
# don't need entire activity loaded here, just UID
act = UploadedActivity()
act.UID = existing["UID"]
try:
act.StartTime = datetime.strptime(existing["StartTime"], "%H:%M:%S %d %m %Y %z")
except:
act.StartTime = datetime.strptime(existing["StartTime"], "%H:%M:%S %d %m %Y") # Exactly one user has managed to break %z :S
if "EndTime" in existing: # some cached activities may not have this, it is not essential
act.EndTime = datetime.strptime(existing["EndTime"], "%H:%M:%S %d %m %Y %z")
else:
logger.debug("Retrieving %s (%s)" % (path, "outdated meta cache" if existing else "not in meta cache"))
# get the full activity
try:
act, rev = self._getActivity(svcRec, dbcl, path)
except APIExcludeActivity as e:
logger.info("Encountered APIExcludeActivity %s" % str(e))
exclusions.append(strip_context(e))
continue
try:
act.EnsureTZ()
except:
pass # We tried.
if hasattr(act, "OriginatedFromTapiriik") and not act.CountTotalWaypoints():
# This is one of the files created when TCX export was hopelessly broken for non-GPS activities.
# Right now, no activities in dropbox from tapiriik should be devoid of waypoints - since dropbox doesn't receive stationary activities
# In the future when this changes, will obviously have to modify this code to also look at modification dates or similar.
if ".tcx.summary-data" in path:
logger.info("...summary file already moved")
else:
logger.info("...moving summary-only file")
dbcl.file_move(path, path.replace(".tcx", ".tcx.summary-data"))
continue # DON'T include in listing - it'll be regenerated
del act.Laps
act.Laps = [] # Yeah, I'll process the activity twice, but at this point CPU time is more plentiful than RAM.
cache["Activities"][hashedRelPath] = {"Rev": rev, "UID": act.UID, "StartTime": act.StartTime.strftime("%H:%M:%S %d %m %Y %z"), "EndTime": act.EndTime.strftime("%H:%M:%S %d %m %Y %z")}
tagRes = self._tagActivity(relPath)
act.ServiceData = {"Path": path, "Tagged":tagRes is not None}
act.Type = tagRes if tagRes is not None else ActivityType.Other
logger.debug("Activity s/t %s" % act.StartTime)
activities.append(act)
if "_id" in cache:
cachedb.dropbox_cache.save(cache)
else:
cachedb.dropbox_cache.insert(cache)
return activities, exclusions
def DownloadActivity(self, serviceRecord, activity):
# activity might not be populated at this point, still possible to bail out
if not activity.ServiceData["Tagged"]:
if not (hasattr(serviceRecord, "Config") and "UploadUntagged" in serviceRecord.Config and serviceRecord.Config["UploadUntagged"]):
raise APIExcludeActivity("Activity untagged", permanent=False, activity_id=activity.ServiceData["Path"], user_exception=UserException(UserExceptionType.Untagged))
# activity might already be populated, if not download it again
path = activity.ServiceData["Path"]
dbcl = self._getClient(serviceRecord)
fullActivity, rev = self._getActivity(serviceRecord, dbcl, path)
fullActivity.Type = activity.Type
fullActivity.ServiceDataCollection = activity.ServiceDataCollection
activity = fullActivity
# Dropbox doesn't support stationary activities yet.
if activity.CountTotalWaypoints() <= 1:
raise APIExcludeActivity("Too few waypoints", activity_id=path, user_exception=UserException(UserExceptionType.Corrupt))
return activity
def _hash_path(self, path):
import hashlib
# Can't use the raw file path as a dict key in Mongo, since who knows what'll be in it (periods especially)
# Used the activity UID for the longest time, but that causes inefficiency when >1 file represents the same activity
# So, this:
csp = hashlib.new("md5")
csp.update(path.encode('utf-8'))
return csp.hexdigest()
def _clean_activity_name(self, name):
# https://www.dropbox.com/help/145/en
return re.sub("[><:\"|?*]", "", re.sub("[/\\\]", "-", name))
def _format_file_name(self, format, activity):
name_pattern = re.compile("#NAME", re.IGNORECASE)
type_pattern = re.compile("#TYPE", re.IGNORECASE)
name = activity.StartTime.strftime(format)
name = name_pattern.sub(self._clean_activity_name(activity.Name) if activity.Name and len(activity.Name) > 0 and activity.Name.lower() != activity.Type.lower() else "", name)
name = type_pattern.sub(activity.Type, name)
name = re.sub(r"([\W_])\1+", r"\1", name) # To handle cases where the activity is unnamed
name = re.sub(r"^([\W_])|([\W_])$", "", name) # To deal with trailing-seperator weirdness (repeated seperator handled by prev regexp)
return name
def UploadActivity(self, serviceRecord, activity):
format = serviceRecord.GetConfiguration()["Format"]
if format == "tcx":
if "tcx" in activity.PrerenderedFormats:
logger.debug("Using prerendered TCX")
data = activity.PrerenderedFormats["tcx"]
else:
data = TCXIO.Dump(activity)
else:
if "gpx" in activity.PrerenderedFormats:
logger.debug("Using prerendered GPX")
data = activity.PrerenderedFormats["gpx"]
else:
data = GPXIO.Dump(activity)
dbcl = self._getClient(serviceRecord)
fname = self._format_file_name(serviceRecord.GetConfiguration()["Filename"], activity)[:250] + "." + format # DB has a max path component length of 255 chars, and we have to save for the file ext (4) and the leading slash (1)
if not serviceRecord.Authorization["Full"]:
fpath = "/" + fname
else:
fpath = serviceRecord.Config["SyncRoot"] + "/" + fname
try:
metadata = dbcl.put_file(fpath, data.encode("UTF-8"))
except rest.ErrorResponse as e:
self._raiseDbException(e)
# fake this in so we don't immediately redownload the activity next time 'round
cache = cachedb.dropbox_cache.find_one({"ExternalID": serviceRecord.ExternalID})
cache["Activities"][self._hash_path("/" + fname)] = {"Rev": metadata["rev"], "UID": activity.UID, "StartTime": activity.StartTime.strftime("%H:%M:%S %d %m %Y %z"), "EndTime": activity.EndTime.strftime("%H:%M:%S %d %m %Y %z")}
cachedb.dropbox_cache.update({"ExternalID": serviceRecord.ExternalID}, cache) # not upsert, hope the record exists at this time...
return fpath
def DeleteCachedData(self, serviceRecord):
cachedb.dropbox_cache.remove({"ExternalID": serviceRecord.ExternalID})
| apache-2.0 |
slimta/python-slimta-cloudstorage | test/test_slimta_cloudstorage_aws.py | 2 | 5079 |
import json
from mox3.mox import MoxTestBase, IsA
from six.moves import cPickle
import gevent
from boto.s3.bucket import Bucket
from boto.s3.key import Key
from boto.sqs.queue import Queue
from boto.sqs.message import Message
from slimta.envelope import Envelope
from slimta.cloudstorage.aws import SimpleStorageService, SimpleQueueService
class TestSimpleStorageService(MoxTestBase):
def setUp(self):
super(TestSimpleStorageService, self).setUp()
self.bucket = self.mox.CreateMock(Bucket)
self.key = self.mox.CreateMock(Key)
self.s3 = SimpleStorageService(self.bucket, prefix='test-')
self.s3.Key = self.mox.CreateMockAnything()
self.env = Envelope('sender@example.com', ['rcpt@example.com'])
self.pickled_env = cPickle.dumps(self.env, cPickle.HIGHEST_PROTOCOL)
def test_write_message(self):
self.s3.Key.__call__(self.bucket).AndReturn(self.key)
self.key.set_metadata('timestamp', '1234.0')
self.key.set_metadata('attempts', '')
self.key.set_metadata('delivered_indexes', '')
self.key.set_contents_from_string(self.pickled_env)
self.mox.ReplayAll()
self.s3.write_message(self.env, 1234.0)
self.assertTrue(isinstance(self.key.key, str))
self.assertTrue(self.key.key.startswith('test-'))
def test_set_message_meta(self):
self.bucket.get_key('storeid').AndReturn(self.key)
self.key.set_metadata('timestamp', '5678.0')
self.key.set_metadata('attempts', '3')
self.mox.ReplayAll()
self.s3.set_message_meta('storeid', 5678.0, 3)
def test_delete_message(self):
self.bucket.get_key('storeid').AndReturn(self.key)
self.key.delete()
self.mox.ReplayAll()
self.s3.delete_message('storeid')
def test_get_message(self):
self.bucket.get_key('storeid').AndReturn(self.key)
self.key.get_contents_as_string().AndReturn(self.pickled_env)
self.key.get_metadata('timestamp').AndReturn('4321.0')
self.key.get_metadata('attempts').AndReturn('5')
self.key.get_metadata('delivered_indexes').AndReturn('')
self.mox.ReplayAll()
env, meta = self.s3.get_message('storeid')
self.assertEqual('sender@example.com', env.sender)
self.assertEqual(['rcpt@example.com'], env.recipients)
self.assertEqual(4321.0, meta['timestamp'])
self.assertEqual(5, meta['attempts'])
self.assertFalse('delivered_indexes' in meta)
def test_get_message_meta(self):
self.bucket.get_key('storeid').AndReturn(self.key)
self.key.get_metadata('timestamp').AndReturn('4321.0')
self.key.get_metadata('attempts').AndReturn('5')
self.key.get_metadata('delivered_indexes').AndReturn('[1, 2]')
self.mox.ReplayAll()
meta = self.s3.get_message_meta('storeid')
self.assertEqual(4321.0, meta['timestamp'])
self.assertEqual(5, meta['attempts'])
self.assertEqual([1, 2], meta['delivered_indexes'])
def test_list_messages(self):
self.mox.StubOutWithMock(self.s3, 'get_message_meta')
self.bucket.list('test-').AndReturn(['test-storeid1', 'test-storeid2'])
self.s3.get_message_meta('test-storeid1').AndReturn((1234.0, 1))
self.s3.get_message_meta('test-storeid2').AndReturn((5678.0, 2))
self.mox.ReplayAll()
ret = list(self.s3.list_messages())
self.assertEqual([(1234.0, 'test-storeid1'), (5678.0, 'test-storeid2')], ret)
class TestSimpleQueueService(MoxTestBase):
def setUp(self):
super(TestSimpleQueueService, self).setUp()
self.queue = self.mox.CreateMock(Queue)
self.sqs = SimpleQueueService(self.queue)
def test_queue_message(self):
self.sqs.Message = self.mox.CreateMockAnything()
msg = self.mox.CreateMock(Message)
self.sqs.Message.__call__().AndReturn(msg)
msg.set_body(json.dumps({'timestamp': 1234.0, 'storage_id': 'storeid'}))
self.queue.write(msg).AndReturn(False)
self.queue.write(msg).AndReturn(True)
self.mox.ReplayAll()
self.sqs.queue_message('storeid', 1234.0)
def test_poll(self):
msg1 = self.mox.CreateMock(Message)
msg2 = self.mox.CreateMock(Message)
self.queue.get_messages().AndReturn([msg1, msg2])
msg1.get_body().AndReturn('{"timestamp": 1234.0, "storage_id": "storeid1"}')
msg2.get_body().AndReturn('{"timestamp": 5678.0, "storage_id": "storeid2"}')
self.mox.ReplayAll()
ret = list(self.sqs.poll())
self.assertEqual([(1234.0, 'storeid1', msg1), (5678.0, 'storeid2', msg2)], ret)
def test_sleep(self):
self.mox.StubOutWithMock(gevent, 'sleep')
gevent.sleep(13.0)
self.mox.ReplayAll()
sqs = SimpleQueueService(None, poll_pause=13.0)
sqs.sleep()
def test_delete(self):
msg = self.mox.CreateMock(Message)
self.queue.delete_message(msg)
self.mox.ReplayAll()
self.sqs.delete(msg)
# vim:et:fdm=marker:sts=4:sw=4:ts=4
| mit |
aequitas/home-assistant | homeassistant/components/twitch/sensor.py | 7 | 2926 | """Support for the Twitch stream status."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_GAME = 'game'
ATTR_TITLE = 'title'
CONF_CHANNELS = 'channels'
CONF_CLIENT_ID = 'client_id'
ICON = 'mdi:twitch'
STATE_OFFLINE = 'offline'
STATE_STREAMING = 'streaming'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CHANNELS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Twitch platform."""
from twitch import TwitchClient
from requests.exceptions import HTTPError
channels = config.get(CONF_CHANNELS, [])
client = TwitchClient(client_id=config.get(CONF_CLIENT_ID))
try:
client.ingests.get_server_list()
except HTTPError:
_LOGGER.error("Client ID is not valid")
return
users = client.users.translate_usernames_to_ids(channels)
add_entities([TwitchSensor(user, client) for user in users], True)
class TwitchSensor(Entity):
"""Representation of an Twitch channel."""
def __init__(self, user, client):
"""Initialize the sensor."""
self._client = client
self._user = user
self._channel = self._user.name
self._id = self._user.id
self._state = STATE_OFFLINE
self._preview = self._game = self._title = None
@property
def should_poll(self):
"""Device should be polled."""
return True
@property
def name(self):
"""Return the name of the sensor."""
return self._channel
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def entity_picture(self):
"""Return preview of current game."""
return self._preview
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._state == STATE_STREAMING:
return {
ATTR_GAME: self._game,
ATTR_TITLE: self._title,
}
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
# pylint: disable=no-member
def update(self):
"""Update device state."""
stream = self._client.streams.get_stream_by_user(self._id)
if stream:
self._game = stream.get('channel').get('game')
self._title = stream.get('channel').get('status')
self._preview = stream.get('preview').get('medium')
self._state = STATE_STREAMING
else:
self._preview = self._client.users.get_by_id(self._id).get('logo')
self._state = STATE_OFFLINE
| apache-2.0 |
p4datasystems/CarnotKEdist | dist/Lib/distutils/tests/test_bdist.py | 127 | 1547 | """Tests for distutils.command.bdist."""
import os
import unittest
from test.test_support import run_unittest
from distutils.command.bdist import bdist
from distutils.tests import support
class BuildTestCase(support.TempdirManager,
unittest.TestCase):
def test_formats(self):
# let's create a command and make sure
# we can set the format
dist = self.create_dist()[1]
cmd = bdist(dist)
cmd.formats = ['msi']
cmd.ensure_finalized()
self.assertEqual(cmd.formats, ['msi'])
# what formats does bdist offer?
formats = ['bztar', 'gztar', 'msi', 'rpm', 'tar',
'wininst', 'zip', 'ztar']
found = sorted(cmd.format_command)
self.assertEqual(found, formats)
def test_skip_build(self):
# bug #10946: bdist --skip-build should trickle down to subcommands
dist = self.create_dist()[1]
cmd = bdist(dist)
cmd.skip_build = 1
cmd.ensure_finalized()
dist.command_obj['bdist'] = cmd
names = ['bdist_dumb', 'bdist_wininst']
# bdist_rpm does not support --skip-build
if os.name == 'nt':
names.append('bdist_msi')
for name in names:
subcmd = cmd.get_finalized_command(name)
self.assertTrue(subcmd.skip_build,
'%s should take --skip-build from bdist' % name)
def test_suite():
return unittest.makeSuite(BuildTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
| apache-2.0 |
morendo/herradar | herradar.py | 1 | 15371 | from mechanize import Browser
from bs4 import BeautifulSoup
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from time import sleep
from datetime import date
from datetime import datetime
from collections import deque
import lxml
import cPickle as pickle
import random
import smtplib
import sys
import traceback
# TODO : improve checking for profile changes, make more efficient?
# TODO : change cycle interval to reflect actual time difference starting from end of data extraction from okc
#################################################################################
# ACCOUNT INFORMATION #
#################################################################################
# This is the email account you wish to receive these alerts on
receivingEmailAddress = ""
# You need to supply a gmail account in order to send the alerts
# create one and fill in the username and password (ignore the gmailAddress variable)
gmailUsername = ""
gmailPassword = ""
gmailAddress = gmailUsername + "@gmail.com"
# TODO 2: create new okc login since this will be visiting profiles, disable visiting on new login
# Supply an OkCupid account. If you aren't a premium user and have invisible mode enabled you may want to make a throwaway
okCupidUsername = ""
okCupidPassword = ""
#################################################################################
# SEARCH PARAMETERS #
#################################################################################
# TODO 2: get searchURL
# Do a search on OKC, using all of the preferences, filters, and options you want this script to use
# then paste the URL in this value
searchURL = ""
# Terms to search tracked profiles for, case insensitive
searchTerms = ['rockin', 'hammer', 'love', 'gifts', 'slime', 'gaming', 'boxing']
#################################################################################
# OTHER BOT SETTINGS #
#################################################################################
# Specify where entity information is saved on your machine
# You can specify a different path or just use the file name to store in the same directory
# If the script has issues reloading collections after starting, change these to the absolute path where you run the script
taggedSaveLocation = "./taggedEntities.pk"
trackingSaveLocation = "./trackingEntities.pk"
# Length of time between cycles in seconds
sleepInterval = 3*60
# Percentage of random variation between cycles (to make this look less like a bot
sleepVariation = .1
# length of time for tracking in days, after the Nth day, tracking stops
trackingAge = 7
#################################################################################
# DO NOT MODIFY ANYTHING BELOW THIS #
#################################################################################
tagged = []
tracking = {}
# Set up Error reporting
def my_excepthook(type, value, tb):
#sendEmail('herRadar has crashed', 'incoming crash report')
traceBack = traceback.format_exception(type, value, tb)
errorMsg = ''
try :
errorEntity = entity
except NameError :
errorEntity = "NA"
for line in traceBack :
errorMsg = errorMsg + line + '<br>'
msg = """herRadar has crashed <br> here is a traceback of the exception <p>""" + errorMsg + """<p>The current entity was: <br>""" + errorEntity
sendEmail('herRadar has crashed', msg)
sys.__excepthook__(type, value, tb)
sys.excepthook = my_excepthook
def loadCollections() :
global tagged
global tracking
try :
with open(taggedSaveLocation, 'rb') as input :
tagged = pickle.load(input)
except IOError :
print taggedSaveLocation
print "Previously tagged entities not found, creating new collection"
tagged = []
try :
with open(trackingSaveLocation, 'rb') as input :
tracking = pickle.load(input)
except IOError :
print "Currently tracked entities not found, creating new collection"
tracking = {}
def saveCollections() :
try :
with open(taggedSaveLocation, 'wb') as output :
pickle.dump(tagged, output, pickle.HIGHEST_PROTOCOL)
except IOError :
print "Error while saving"
try :
with open(trackingSaveLocation, 'wb') as output :
pickle.dump(tracking, output, pickle.HIGHEST_PROTOCOL)
except IOError :
print "Error while saving"
def sendEmail(subject, msg) :
mime = MIMEMultipart('alternative')
mime['Subject'] = subject
mime['From'] = gmailUsername
mime['To'] = receivingEmailAddress
mime.attach(MIMEText(msg, 'html'))
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(gmailUsername, gmailPassword)
server.sendmail(gmailAddress, receivingEmailAddress, mime.as_string())
server.quit()
#print 'Sending Email: \n' + subject + '\n' + msg
print 'Sending email'
class acNode :
def __init__(self, ch) :
self.char = ch
self.transitions = []
self.results = []
self.fail = None
class searchTree :
def __init__(self) :
self.terms = []
self.root = None
def add(self, term) :
self.terms.append(term)
def make(self) :
# Create the root node and queue for failure paths
root = acNode(None)
root.fail = root
queue = deque([root])
# Create the initial tree
for keyword in self.terms :
current_node = root
for ch in keyword :
new_node = None
for transition in current_node.transitions:
if transition.char == ch:
new_node = transition
break
if new_node is None:
new_node = acNode(ch)
current_node.transitions.append(new_node)
if current_node is root:
new_node.fail = root
current_node = new_node
current_node.results.append(keyword)
# Create failure paths
while queue:
current_node = queue.popleft()
for node in current_node.transitions:
queue.append(node)
fail_state_node = current_node.fail
while not any(x for x in fail_state_node.transitions if node.char == x.char) and fail_state_node is not root:
fail_state_node = fail_state_node.fail
node.fail = next((x for x in fail_state_node.transitions if node.char == x.char and x is not node), root)
# tree has been built! return it
self.root = root
def search(self, text) :
hits = []
currentNode = self.root
# Loop through characters
for c in text :
# Find next state (if no transition exists, fail function is used)
# walks through tree until transition is found or root is reached
trans = None
while trans == None :
# trans=currentNode.GetTransition(text[index])
for x in currentNode.transitions :
if x.char == c :
trans = x
if currentNode == self.root : break
if trans==None : currentNode=currentNode.fail
if trans != None : currentNode=trans
# Add results from node to output array and move to next character
for result in currentNode.results :
hits.append(result)
# Convert results to array
return hits
# Load list of tagged entities
loadCollections()
print 'Loaded collections'
print 'Creating search tree'
tree = searchTree()
for term in searchTerms :
tree.add(term.lower())
tree.make()
# Send notification to the gmail account that the server is running
sendEmail("HerRadar is now activated", "")
while 1 == 1 :
# Create boolean to determine if a save is necessary
saveNeeded = False
print 'Opening browser'
# Create the browser and begin accessing sites
browser = Browser()
# Log in to OkCupid
browser.open("http://www.okcupid.com/login")
# New fix to select the correct form
formcount=0
for f in browser.forms():
if str(f.attrs["id"])=="loginbox_form":
break
formcount=formcount+1
browser.select_form(nr=formcount)
browser['username']=okCupidUsername
browser['password']=okCupidPassword
browser.submit()
# Access search page for OKC
response = browser.open(searchURL)
content = response.read()
# Pull user html tags from OKCupid data
soup = BeautifulSoup(content, "lxml")
#soup = BeautifulSoup(content)
nameTags = soup.find_all(attrs={"class": "name"})
print 'Extracting entity list'
# Strip the usernames from the tags
collection = []
for entity in nameTags :
collection.append(entity.string.encode('ascii', 'ignore'))
# Print list of entities seen
print 'Entities found ', collection
# Get Current Date
today = date.today()
# Look for new entities
print 'Looking for new entities'
for entity in collection :
if (tagged.count(entity) == 0) :
tagged.append(entity)
if (not entity in tracking) :
# Add entity info to the tracking list
try :
entityAge = soup.find(id='usr-'+entity).find(attrs={"class": "age"}).string.encode('ascii', 'ignore')
except KeyError :
entityAge = 'ERROR'
print "There was a keyError generating entityAge for " + entity
try :
entityImage = soup.find(id='usr-'+entity).a['data-image-url'].encode('ascii', 'ignore')
except KeyError :
entityImage = 'ERROR'
print "There was a keyError generating entityImage for " + entity
entityProfile = 'http://okcupid.com/profile/' + entity
# Save entity info to tracking list
print 'Entity: ' + entity + ', added to tracking'
tracking[entity] = [today, "", entityAge, entityImage, entityProfile]
# Update boolean so lists are saved after this cycle
saveNeeded = True
# TODO 2 : do I want to check tracked entities now? or more / less frequently?
# Remove old entries
print 'Removing expired entities from tracking'
expired = []
for entity in tracking :
entityDate = tracking[entity][0]
# Remove entities from tracking if over age
if (today - entityDate).days > trackingAge :
expired.append(entity)
for entity in expired :
print 'Removing Entity: ' + entity
del tracking[entity]
# If any entities are expired, update boolean so lists are saved after this cycle
if len(expired) > 0 :
saveNeeded = True
# Check tracked entities
print 'Checking tracked entities for changes'
for entity in tracking :
tuple = tracking[entity]
# Remove entities from tracking if over age
if (today - tuple[0]).days > trackingAge :
print 'Entity: ' + entity + ', removed from tracking'
del tracking[entity]
# Update boolean so lists are saved after this cycle
saveNeeded = True
# Check pre-existing entities for changes to profile
else :
response = browser.open('http://okcupid.com/profile/' + entity)
content = response.read()
soup = BeautifulSoup(content)
# Iterate through each profile section and pull out text; try/catch block for sections that don't exist
profile = ''
for i in range(10) :
essayID = 'essay_text_' + str(i)
essayContent = soup.find(id=essayID)
try :
profile += essayContent.text.encode('ascii', 'ignore')
except AttributeError :
profile += ''
# Check for changes to the profile
if profile != tuple[1] :
print 'Entity: ' + entity + ', has changes to profile'
# Search for keywords
results = []
resultString = ''
for result in tree.search(profile.lower()) :
results.append(result)
resultString += result + ', '
'''
if acSearch :
results.append(profile[result[0], result[1]])
resultString += profile[result[0], result[1]] + ', '
else :
results.append(result)
resultString += result + ', '
'''
# Check for matches
if len(results) > 0 :
# Debug
print 'Entity: ' + entity + ' has matched terms: ', results
# Report a match!
msg = """The following terms were found: <br>
""" + resultString + """ <br>
For User: """ + entity + """ <br>
Age: """ + tuple[2] + """ <br>
<img src='""" + tuple[3] + """'><br>
<a href='""" + tuple[4] + """'>Profile</a>
<br> <br>
This message means that either the entity just added these terms to their profile or has just updated it.
"""
sendEmail('Tracking result for ' + entity, msg)
# Update stored profile
tuple[1] = profile
# Update boolean so lists are saved after this cycle
saveNeeded = True
browser.close()
# Save the tagged list of entities
if (saveNeeded) :
print "Saving collections"
saveCollections()
print "Collections saved"
# see if it's time to send a heartbeat
time = datetime.now().time()
if time.hour == 12 & time.minute < 10 :
sendEmail('herRadar is active', 'thump')
# Wait and repeat
sleep(random.randint(int(sleepInterval-(sleepVariation*sleepInterval)), int(sleepInterval+(sleepVariation*sleepInterval))))
| mit |
barbour-em/osf.io | scripts/analytics/tabulate_emails.py | 30 | 1299 | # -*- coding: utf-8 -*-
"""Scripts for counting recently added users by email domain; pushes results
to the specified project.
"""
import datetime
import collections
from cStringIO import StringIO
from framework.mongo import database
from website import models
from website.app import app, init_app
from scripts.analytics import utils
from scripts.analytics import settings
def get_emails(query=None):
users = database['user'].find(query, {'username': True})
counts = collections.Counter(
user['username'].split('@')[-1]
for user in users
)
return counts.most_common()
def get_emails_since(delta):
return get_emails({
'is_registered': True,
'password': {'$ne': None},
'is_merged': {'$ne': True},
'date_confirmed': {'$gte': datetime.datetime.utcnow() - delta},
})
def main():
node = models.Node.load(settings.TABULATE_EMAILS_NODE_ID)
user = models.User.load(settings.TABULATE_EMAILS_USER_ID)
emails = get_emails_since(settings.TABULATE_EMAILS_TIME_DELTA)
sio = StringIO()
utils.make_csv(sio, emails, ['affiliation', 'count'])
utils.send_file(app, settings.TABULATE_EMAILS_FILE_NAME, settings.TABULATE_EMAILS_CONTENT_TYPE, sio, node, user)
if __name__ == '__main__':
init_app()
main()
| apache-2.0 |
Xykon/pycom-micropython-sigfox | tests/basics/string_format.py | 47 | 1676 | # basic functionality test for {} format string
def test(fmt, *args):
print('{:8s}'.format(fmt) + '>' + fmt.format(*args) + '<')
test("}}{{")
test("{}-{}", 1, [4, 5])
test("{0}-{1}", 1, [4, 5])
test("{1}-{0}", 1, [4, 5])
test("{:x}", 1)
test("{!r}", 2)
test("{:x}", 0x10)
test("{!r}", "foo")
test("{!s}", "foo")
test("{0!r:>10s} {0!s:>10s}", "foo")
test("{:4b}", 10)
test("{:4c}", 48)
test("{:4d}", 123)
test("{:4n}", 123)
test("{:4o}", 123)
test("{:4x}", 123)
test("{:4X}", 123)
test("{:4,d}", 12345678)
test("{:#4b}", 10)
test("{:#4o}", 123)
test("{:#4x}", 123)
test("{:#4X}", 123)
test("{:#4d}", 0)
test("{:#4b}", 0)
test("{:#4o}", 0)
test("{:#4x}", 0)
test("{:#4X}", 0)
test("{:<6s}", "ab")
test("{:>6s}", "ab")
test("{:^6s}", "ab")
test("{:.1s}", "ab")
test("{: <6d}", 123)
test("{: <6d}", -123)
test("{:0<6d}", 123)
test("{:0<6d}", -123)
test("{:@<6d}", 123)
test("{:@<6d}", -123)
test("{:@< 6d}", 123)
test("{:@< 6d}", -123)
test("{:@<+6d}", 123)
test("{:@<+6d}", -123)
test("{:@<-6d}", 123)
test("{:@<-6d}", -123)
test("{:@>6d}", -123)
test("{:@<6d}", -123)
test("{:@=6d}", -123)
test("{:06d}", -123)
test("{:>20}", "foo")
test("{:^20}", "foo")
test("{:<20}", "foo")
# nested format specifiers
print("{:{}}".format(123, '#>10'))
print("{:{}{}{}}".format(123, '#', '>', '10'))
print("{0:{1}{2}}".format(123, '#>', '10'))
print("{text:{align}{width}}".format(text="foo", align="<", width=20))
print("{text:{align}{width}}".format(text="foo", align="^", width=10))
print("{text:{align}{width}}".format(text="foo", align=">", width=30))
print("{foo}/foo".format(foo="bar"))
print("{}".format(123, foo="bar"))
print("{}-{foo}".format(123, foo="bar"))
| mit |
JoeJasinski/evesch | evesch/core/feed/views.py | 1 | 8264 | from icalendar import Calendar, vCalAddress, vText
import icalendar
from datetime import timedelta
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
#from django.contrib.syndication.views import feed
from django.utils import feedgenerator
from django.template.loader import render_to_string
from django.http import HttpResponse
from evesch.org.models import Organization
from evesch.event.models import Event
from evesch.core.feed.feeds import OrgFeed
from evesch.euser.models import eUser, get_current_user
def org_rss(request,org_short_name,org_feed_hash):
try:
"""
"""
host = request.META['HTTP_HOST']
current_org, message = Organization.objects.get_current_org(org_short_name)
if message:
return HttpResponseRedirect(reverse('org_orgs_list'))
if not org_feed_hash == current_org.org_feed_hash:
return HttpResponseRedirect(reverse('org_org_view', kwargs={'org_short_name':current_org.org_short_name}))
events = current_org.event_set.all().order_by('-event_date')
orgfeed = feedgenerator.Rss201rev2Feed(title=current_org.org_name,
link="http://%s%s" % (host, reverse('event_events_list',kwargs={'org_short_name':current_org.org_short_name,})),
description=current_org.org_desc, language='en',
)
for event in events:
orgfeed.add_item(
title=event.event_name,
link="http://%s%s" % (host, reverse('event_event_view', kwargs={'org_short_name':current_org.org_short_name,'event_hash':event.event_hash})),
description="Event on: %s -- Description: %s" % (event.event_date.strftime('%d %b %Y'), event.event_desc),
categories=(event.event_type,),
author_name=event.event_creator_name,
pubdate=event.event_created_date)
response = HttpResponse()
response['Content-Type'] = 'application/rss+xml'
response.write(orgfeed.writeString('UTF-8'))
#template_name = "error.html"
return response
except ObjectDoesNotExist:
context = {'error':"Organization does not exist",}
template_name = "error.html"
return render_to_response(template_name,context,context_instance=RequestContext(request))
def org_ics(request,org_short_name,org_feed_hash):
host = request.META['HTTP_HOST']
current_org, message = Organization.objects.get_current_org(org_short_name)
if message:
return HttpResponseRedirect(reverse('org_orgs_list'))
if not org_feed_hash == current_org.org_feed_hash:
return HttpResponseRedirect(reverse('org_org_view', kwargs={'org_short_name':current_org.org_short_name}))
events = current_org.event_set.all().order_by('-event_date')
orgical = Calendar()
orgical['summary'] = "Calendar for organization %s" % (current_org.org_name)
orgical.add('prodid', '-//Evesch//NONSGML v1.0//EN')
orgical.add('version', '2.0')
for event in events:
cal_event = icalendar.Event()
cal_event.add('summary', event.event_name)
cal_event.add('dtstart', event.event_date)
cal_event.add('description', event.event_desc)
cal_event.add('categories',event.event_type)
cal_event.add('duration',timedelta(hours=1))
cal_event.add('url',"http://%s%s" % (host, reverse('event_event_view',kwargs={'org_short_name':current_org.org_short_name,'event_hash':event.event_hash,})))
if event.event_creator_name.email:
organizer_n = event.event_creator_name.email
else:
organizer_n = "%s %s" % (event.event_creator_name.first_name, event.event_creator_name.last_name)
organizer = vCalAddress('MAILTO:' + organizer_n)
organizer.params['cn'] = vText("%s %s" % (event.event_creator_name.first_name, event.event_creator_name.last_name))
organizer.params['role'] = vText('CREATOR')
cal_event.add('organizer', organizer, encode=0)
orgical.add_component(cal_event)
template_name = "core/message.html"
context = {}
response = HttpResponse()
response['Content-Type'] = 'text/calendar'
response.write(orgical.to_ical())
#template_name = "error.html"
return response
def user_rss(request,username,user_feed_hash):
try:
""" """
host = request.META['HTTP_HOST']
current_user, message = get_current_user(username)
if message:
return HttpResponseRedirect(reverse('home'))
if not user_feed_hash == current_user.user_feed_hash:
return HttpResponseRedirect(reverse('euser_user_view', kwargs={'username':current_user.username}))
user_events = Event.objects.filter(attendee__in=current_user.attendee_set.all()).order_by('-event_date')
orgfeed = feedgenerator.Rss201rev2Feed(title=current_user.username,
link="http://%s%s" % (host, reverse('euser_user_view', kwargs={'username':current_user.username})) ,
description=current_user.about, language='en',
)
for event in user_events:
orgfeed.add_item(
title=event.event_name,
link="http://%s%s" % (host, reverse('event_event_view', kwargs={'org_short_name':event.event_org.org_short_name,'event_hash':event.event_hash})),
description="Event on: %s -- Description: %s" % (event.event_date.strftime('%d %b %Y'), event.event_desc),
categories=(event.event_type,),
author_name=event.event_creator_name,
pubdate=event.event_created_date)
response = HttpResponse()
response['Content-Type'] = 'application/rss+xml'
response.write(orgfeed.writeString('UTF-8'))
#template_name = "error.html"
return response
except ObjectDoesNotExist:
context = {'error':"Organization does not exist",}
template_name = "error.html"
return render_to_response(template_name,context,context_instance=RequestContext(request))
def user_ics(request,username,user_feed_hash):
host = request.META['HTTP_HOST']
current_user, message = get_current_user(username)
if message:
return HttpResponseRedirect(reverse('home'))
#user_events = Event.objects.all()
if not user_feed_hash == current_user.user_feed_hash:
return HttpResponseRedirect(reverse('euser_user_view', kwargs={'username':current_user.username}))
user_events = Event.objects.filter(attendee__in=current_user.attendee_set.all()).order_by('-event_date')
userical = Calendar()
userical['summary'] = "Calendar for user %s" % (current_user.username)
userical.add('prodid', '-//Evesch//NONSGML v1.0//EN')
userical.add('version', '2.0')
for event in user_events:
cal_event = icalendar.Event()
cal_event.add('summary', event.event_name)
cal_event.add('dtstart', event.event_date)
cal_event.add('description', event.event_desc)
cal_event.add('categories',event.event_type)
cal_event.add('duration',timedelta(hours=1))
cal_event.add('url',"http://" + host + reverse('event_event_view',kwargs={'org_short_name':event.event_org.org_short_name,'event_hash':event.event_hash,}))
if event.event_creator_name.email:
organizer_n = event.event_creator_name.email
else:
organizer_n = "%s %s" % (event.event_creator_name.first_name, event.event_creator_name.last_name)
organizer = vCalAddress('MAILTO:' + organizer_n)
organizer.params['cn'] = vText("%s %s" % (event.event_creator_name.first_name, event.event_creator_name.last_name))
organizer.params['role'] = vText('CREATOR')
cal_event.add('organizer', organizer, encode=0)
userical.add_component(cal_event)
template_name = "core/message.html"
context = {}
response = HttpResponse()
response['Content-Type'] = 'text/calendar'
response.write(userical.as_string())
#template_name = "error.html"
return response
| gpl-2.0 |
ESOedX/edx-platform | common/lib/xmodule/xmodule/progress.py | 2 | 4089 | '''
Progress class for modules. Represents where a student is in a module.
For most subclassing needs, you should only need to reimplement
frac() and __str__().
'''
from __future__ import absolute_import
import numbers
class Progress(object):
'''Represents a progress of a/b (a out of b done)
a and b must be numeric, but not necessarily integer, with
0 <= a <= b and b > 0.
Progress can only represent Progress for modules where that makes sense. Other
modules (e.g. html) should return None from get_progress().
TODO: add tag for module type? Would allow for smarter merging.
'''
def __init__(self, a, b):
'''Construct a Progress object. a and b must be numbers, and must have
0 <= a <= b and b > 0
'''
# Want to do all checking at construction time, so explicitly check types
if not (isinstance(a, numbers.Number) and
isinstance(b, numbers.Number)):
raise TypeError('a and b must be numbers. Passed {0}/{1}'.format(a, b))
if a > b:
a = b
if a < 0:
a = 0
if b <= 0:
raise ValueError('fraction a/b = {0}/{1} must have b > 0'.format(a, b))
self._a = a
self._b = b
def frac(self):
''' Return tuple (a,b) representing progress of a/b'''
return (self._a, self._b)
def percent(self):
''' Returns a percentage progress as a float between 0 and 100.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return 100.0 * a / b
def started(self):
''' Returns True if fractional progress is greater than 0.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
return self.frac()[0] > 0
def inprogress(self):
''' Returns True if fractional progress is strictly between 0 and 1.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return a > 0 and a < b
def done(self):
''' Return True if this represents done.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return a == b
def ternary_str(self):
''' Return a string version of this progress: either
"none", "in_progress", or "done".
subclassing note: implemented in terms of frac()
'''
(a, b) = self.frac()
if a == 0:
return "none"
if a < b:
return "in_progress"
return "done"
def __eq__(self, other):
''' Two Progress objects are equal if they have identical values.
Implemented in terms of frac()'''
if not isinstance(other, Progress):
return False
(a, b) = self.frac()
(a2, b2) = other.frac()
return a == a2 and b == b2
def __ne__(self, other):
''' The opposite of equal'''
return not self.__eq__(other)
def __str__(self):
'''Return a string representation of this string. Rounds results to
two decimal places, stripping out any trailing zeroes.
subclassing note: implemented in terms of frac().
'''
(a, b) = self.frac()
display = lambda n: '{:.2f}'.format(n).rstrip('0').rstrip('.')
return "{0}/{1}".format(display(a), display(b))
@staticmethod
def add_counts(a, b):
'''Add two progress indicators, assuming that each represents items done:
(a / b) + (c / d) = (a + c) / (b + d).
If either is None, returns the other.
'''
if a is None:
return b
if b is None:
return a
# get numerators + denominators
(n, d) = a.frac()
(n2, d2) = b.frac()
return Progress(n + n2, d + d2)
| agpl-3.0 |
Minghi/shadowsocks | shadowsocks/daemon.py | 694 | 5602 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import logging
import signal
import time
from shadowsocks import common, shell
# this module is ported from ShadowVPN daemon.c
def daemon_exec(config):
if 'daemon' in config:
if os.name != 'posix':
raise Exception('daemon mode is only supported on Unix')
command = config['daemon']
if not command:
command = 'start'
pid_file = config['pid-file']
log_file = config['log-file']
if command == 'start':
daemon_start(pid_file, log_file)
elif command == 'stop':
daemon_stop(pid_file)
# always exit after daemon_stop
sys.exit(0)
elif command == 'restart':
daemon_stop(pid_file)
daemon_start(pid_file, log_file)
else:
raise Exception('unsupported daemon command %s' % command)
def write_pid_file(pid_file, pid):
import fcntl
import stat
try:
fd = os.open(pid_file, os.O_RDWR | os.O_CREAT,
stat.S_IRUSR | stat.S_IWUSR)
except OSError as e:
shell.print_exception(e)
return -1
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
assert flags != -1
flags |= fcntl.FD_CLOEXEC
r = fcntl.fcntl(fd, fcntl.F_SETFD, flags)
assert r != -1
# There is no platform independent way to implement fcntl(fd, F_SETLK, &fl)
# via fcntl.fcntl. So use lockf instead
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET)
except IOError:
r = os.read(fd, 32)
if r:
logging.error('already started at pid %s' % common.to_str(r))
else:
logging.error('already started')
os.close(fd)
return -1
os.ftruncate(fd, 0)
os.write(fd, common.to_bytes(str(pid)))
return 0
def freopen(f, mode, stream):
oldf = open(f, mode)
oldfd = oldf.fileno()
newfd = stream.fileno()
os.close(newfd)
os.dup2(oldfd, newfd)
def daemon_start(pid_file, log_file):
def handle_exit(signum, _):
if signum == signal.SIGTERM:
sys.exit(0)
sys.exit(1)
signal.signal(signal.SIGINT, handle_exit)
signal.signal(signal.SIGTERM, handle_exit)
# fork only once because we are sure parent will exit
pid = os.fork()
assert pid != -1
if pid > 0:
# parent waits for its child
time.sleep(5)
sys.exit(0)
# child signals its parent to exit
ppid = os.getppid()
pid = os.getpid()
if write_pid_file(pid_file, pid) != 0:
os.kill(ppid, signal.SIGINT)
sys.exit(1)
os.setsid()
signal.signal(signal.SIGHUP, signal.SIG_IGN)
print('started')
os.kill(ppid, signal.SIGTERM)
sys.stdin.close()
try:
freopen(log_file, 'a', sys.stdout)
freopen(log_file, 'a', sys.stderr)
except IOError as e:
shell.print_exception(e)
sys.exit(1)
def daemon_stop(pid_file):
import errno
try:
with open(pid_file) as f:
buf = f.read()
pid = common.to_str(buf)
if not buf:
logging.error('not running')
except IOError as e:
shell.print_exception(e)
if e.errno == errno.ENOENT:
# always exit 0 if we are sure daemon is not running
logging.error('not running')
return
sys.exit(1)
pid = int(pid)
if pid > 0:
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == errno.ESRCH:
logging.error('not running')
# always exit 0 if we are sure daemon is not running
return
shell.print_exception(e)
sys.exit(1)
else:
logging.error('pid is not positive: %d', pid)
# sleep for maximum 10s
for i in range(0, 200):
try:
# query for the pid
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
break
time.sleep(0.05)
else:
logging.error('timed out when stopping pid %d', pid)
sys.exit(1)
print('stopped')
os.unlink(pid_file)
def set_user(username):
if username is None:
return
import pwd
import grp
try:
pwrec = pwd.getpwnam(username)
except KeyError:
logging.error('user not found: %s' % username)
raise
user = pwrec[0]
uid = pwrec[2]
gid = pwrec[3]
cur_uid = os.getuid()
if uid == cur_uid:
return
if cur_uid != 0:
logging.error('can not set user as nonroot user')
# will raise later
# inspired by supervisor
if hasattr(os, 'setgroups'):
groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]]
groups.insert(0, gid)
os.setgroups(groups)
os.setgid(gid)
os.setuid(uid)
| apache-2.0 |
fanjunwei/depot_tools | third_party/boto/s3/key.py | 51 | 73164 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import mimetypes
import os
import re
import rfc822
import StringIO
import base64
import binascii
import math
import urllib
import boto.utils
from boto.exception import BotoClientError
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5
try:
from hashlib import md5
except ImportError:
from md5 import md5
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = 8192
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self.storage_class = 'STANDARD'
self.md5 = None
self.base64md5 = None
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
def __repr__(self):
if self.bucket:
return '<Key: %s,%s>' % (self.bucket.name, self.name)
else:
return '<Key: None,%s>' % self.name
def __getattr__(self, name):
if name == 'key':
return self.name
else:
raise AttributeError
def __setattr__(self, name, value):
if name == 'key':
self.__dict__['name'] = value
else:
self.__dict__[name] = value
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = base64.encodestring(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def handle_restore_headers(self, response):
header = response.getheader('x-amz-restore')
if header is None:
return
parts = header.split(',', 1)
for part in parts:
key, val = [i.strip() for i in part.split('=')]
val = val.replace('"', '')
if key == 'ongoing-request':
self.ongoing_restore = True if val.lower() == 'true' else False
elif key == 'expiry-date':
self.expiry_date = val
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.resp == None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() == 'etag':
self.etag = value
elif name.lower() == 'content-type':
self.content_type = value
elif name.lower() == 'content-encoding':
self.content_encoding = value
elif name.lower() == 'content-language':
self.content_language = value
elif name.lower() == 'last-modified':
self.last_modified = value
elif name.lower() == 'cache-control':
self.cache_control = value
elif name.lower() == 'content-disposition':
self.content_disposition = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self, fast=False):
"""
Close this key.
:type fast: bool
:param fast: True if you want the connection to be closed without first
reading the content. This should only be used in cases where subsequent
calls don't need to return the content from the open HTTP connection.
Note: As explained at
http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
callers must read the whole response before sending a new request to the
server. Calling Key.close(fast=True) and making a subsequent request to
the server will work because boto will get an httplib exception and
close/reopen the connection.
"""
if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None,
validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key will be used.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
if new_storage_class == 'STANDARD':
return self.copy(self.bucket.name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(self.bucket.name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the
storage class of the new Key to be REDUCED_REDUNDANCY
regardless of the storage class of the key being copied.
The Reduced Redundancy Storage (RRS) feature of S3,
provides lower redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket,
validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name))
def delete(self):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
self.metadata[name] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket != None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket != None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket != None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket != None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def get_redirect(self):
"""Return the redirect location configured for this key.
If no redirect is configured (via set_redirect), then None
will be returned.
"""
response = self.bucket.connection.make_request(
'HEAD', self.bucket.name, self.name)
if response.status == 200:
return response.getheader('x-amz-website-redirect-location')
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def set_redirect(self, redirect_location):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
"""
headers = {'x-amz-website-redirect-location': redirect_location}
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
return True
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None,
policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds
:type method: string
:param method: The method to use for retrieving the file
(default is GET)
:type headers: dict
:param headers: Any headers to pass along in the request
:type query_auth: bool
:param query_auth:
:type force_http: bool
:param force_http: If True, http will be used instead of https.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type expires_in_absolute: bool
:param expires_in_absolute:
:type version_id: string
:param version_id: The version_id of the object to GET. If specified
this overrides any value in the key.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:rtype: string
:return: The URL to access the key
"""
provider = self.bucket.connection.provider
version_id = version_id or self.version_id
if headers is None:
headers = {}
else:
headers = headers.copy()
# add headers accordingly (usually PUT case)
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute,
version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
http_conn.putrequest(method, path)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
# Calculate all MD5 checksums on the fly, if not already computed
if not self.base64md5:
m = md5()
else:
m = None
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
if m:
m.update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
self.size = data_len
if m:
# Use the chunked trailer for the digest
hd = m.hexdigest()
self.md5, self.base64md5 = self.get_md5_from_hexdigest(hd)
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
if ((response.status == 500 or response.status == 503 or
response.getheader('location')) and not chunked_transfer):
# we'll try again.
return response
elif response.status >= 200 and response.status <= 299:
self.etag = response.getheader('etag')
if self.etag != '"%s"' % self.md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5')
return response
else:
raise provider.storage_response_error(
response.status, response.reason, body)
if not headers:
headers = {}
else:
headers = headers.copy()
headers['User-Agent'] = UserAgent
if self.storage_class != 'STANDARD':
headers[provider.storage_class_header] = self.storage_class
if 'Content-Encoding' in headers:
self.content_encoding = headers['Content-Encoding']
if 'Content-Language' in headers:
self.content_encoding = headers['Content-Language']
if 'Content-Type' in headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if headers['Content-Type'] is None:
# Delete null Content-Type value to skip sending that header.
del headers['Content-Type']
else:
self.content_type = headers['Content-Type']
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type == None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers,
sender=sender,
query_args=query_args)
self.handle_version_headers(resp, force=True)
def compute_md5(self, fp, size=None):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file
pointer will be reset to the same position before the
method returns.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
inplace into different parts. Less bytes may be available.
:rtype: tuple
:return: A tuple containing the hex digest version of the MD5
hash as the first element and the base64 encoded version
of the plain digest as the second element.
"""
tup = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = tup[2]
return tup[0:2]
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None,
size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket != None:
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents. The data is read from 'fp' from its current position until
'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will
first check to see if an object exists in the bucket with
the same key. If it does, it won't overwrite it. The
default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will
be rewound to the start before any bytes are read from
it. The default behaviour is False which reads from the
current position of the file pointer (fp).
:rtype: int
:return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket != None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
chunked_transfer = True
self.size = None
else:
chunked_transfer = False
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name == None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
# return number of bytes written.
return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. :type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
"""
fp = open(filename, 'rb')
try:
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
finally:
fp.close()
def set_contents_from_string(self, s, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
if isinstance(s, unicode):
s = s.encode("utf-8")
fp = StringIO.StringIO(s)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = query_args or []
if torrent:
query_args.append('torrent')
m = None
else:
m = md5()
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (key, urllib.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
data_len = 0
if cb:
if self.size is None:
cb_size = 0
else:
cb_size = self.size
if self.size is None and num_cb != -1:
# If size is not available due to chunked transfer for example,
# we'll call the cb for every 1MB of data transferred.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
for bytes in self:
fp.write(bytes)
data_len += len(bytes)
if m:
m.update(bytes)
if cb:
if cb_size > 0 and data_len >= cb_size:
break
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
if m:
self.md5 = m.hexdigest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.bucket != None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
fp = open(filename, 'wb')
try:
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
except Exception:
os.remove(filename)
raise
finally:
fp.close()
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified != None:
try:
modified_tuple = rfc822.parsedate_tz(self.last_modified)
modified_stamp = int(rfc822.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception:
pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:rtype: string
:returns: The contents of the file as a string
"""
fp = StringIO.StringIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
return fp.getvalue()
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
def _normalize_metadata(self, metadata):
if type(metadata) == set:
norm_metadata = set()
for k in metadata:
norm_metadata.add(k.lower())
else:
norm_metadata = {}
for k in metadata:
norm_metadata[k.lower()] = metadata[k]
return norm_metadata
def _get_remote_metadata(self, headers=None):
"""
Extracts metadata from existing URI into a dict, so we can
overwrite/delete from it to form the new set of metadata to apply to a
key.
"""
metadata = {}
for underscore_name in self._underscore_base_user_settable_fields:
if hasattr(self, underscore_name):
value = getattr(self, underscore_name)
if value:
# Generate HTTP field name corresponding to "_" named field.
field_name = underscore_name.replace('_', '-')
metadata[field_name.lower()] = value
# self.metadata contains custom metadata, which are all user-settable.
prefix = self.provider.metadata_prefix
for underscore_name in self.metadata:
field_name = underscore_name.replace('_', '-')
metadata['%s%s' % (prefix, field_name.lower())] = (
self.metadata[underscore_name])
return metadata
def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
metadata_plus = self._normalize_metadata(metadata_plus)
metadata_minus = self._normalize_metadata(metadata_minus)
metadata = self._get_remote_metadata()
metadata.update(metadata_plus)
for h in metadata_minus:
if h in metadata:
del metadata[h]
src_bucket = self.bucket
# Boto prepends the meta prefix when adding headers, so strip prefix in
# metadata before sending back in to copy_key() call.
rewritten_metadata = {}
for h in metadata:
if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
rewritten_h = (h.replace('x-goog-meta-', '')
.replace('x-amz-meta-', ''))
else:
rewritten_h = h
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
metadata=metadata, preserve_acl=preserve_acl,
headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
:type days: int
:param days: The lifetime of the restored object (must
be at least 1 day). If the object is already restored
then this parameter can be used to readjust the lifetime
of the restored object. In this case, the days
param is with respect to the initial time of the request.
If the object has not been restored, this param is with
respect to the completion time of the request.
"""
response = self.bucket.connection.make_request(
'POST', self.bucket.name, self.name,
data=self.RestoreBody % days,
headers=headers, query_args='restore')
if response.status not in (200, 202):
provider = self.bucket.connection.provider
raise provider.storage_response_error(response.status,
response.reason,
response.read())
| bsd-3-clause |
alex/scales | src/greplin/scales/twistedweb.py | 3 | 2023 | # Copyright 2011 The scales Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines Twisted Web resources for status reporting."""
from greplin import scales
from greplin.scales import formats, util
from twisted.web import resource
class StatsResource(resource.Resource):
"""Twisted web resource for a status page."""
isLeaf = True
def __init__(self, serverName):
resource.Resource.__init__(self)
self.serverName = serverName
def render_GET(self, request):
"""Renders a GET request, by showing this nodes stats and children."""
fullPath = request.path.split('/')
if not fullPath[-1]:
fullPath = fullPath[:-1]
parts = fullPath[2:]
statDict = util.lookup(scales.getStats(), parts)
if statDict is None:
request.setResponseCode(404)
return "Path not found."
if 'query' in request.args:
query = request.args['query'][0]
else:
query = None
if 'format' in request.args and request.args['format'][0] == 'json':
request.headers['content-type'] = 'text/javascript; charset=UTF-8'
formats.jsonFormat(request, statDict, query)
elif 'format' in request.args and request.args['format'][0] == 'prettyjson':
request.headers['content-type'] = 'text/javascript; charset=UTF-8'
formats.jsonFormat(request, statDict, query, pretty=True)
else:
formats.htmlHeader(request, '/' + '/'.join(parts), self.serverName, query)
formats.htmlFormat(request, tuple(parts), statDict, query)
return ''
| apache-2.0 |
phragment/xmms2switch | xmms2switch.py | 1 | 2080 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright © 2012 Thomas Krug
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from optparse import OptionParser
import xmmsclient
import os
import sys
parser = OptionParser()
parser.add_option("-n", "--next",
action="store_true", dest="next", default=False,
help="switch to next playlist")
parser.add_option("-p", "--prev",
action="store_true", dest="prev", default=False,
help="switch to previous playlist")
(options, args) = parser.parse_args()
if options.next and options.prev:
parser.error("options -n and -p are mutually exclusive")
sys.exit(1)
xmms = xmmsclient.XMMSSync("xmms2switch")
try:
xmms.connect(os.getenv("XMMS_PATH"))
except IOError, detail:
print "Error:", detail
sys.exit(1)
playlist_cur = xmms.playlist_current_active()
position_cur = 0
position = 0
playlists = []
for playlist in xmms.playlist_list():
if not playlist.startswith("_"):
playlists.append(playlist)
if playlist == playlist_cur:
position_cur = position
position += 1
if options.next:
position_new = position_cur + 1
if position_new >= position:
position_new -= position
xmms.playlist_load(playlists[position_new])
if options.prev:
position_new = position_cur - 1
if position_new < 0:
position_new += position
xmms.playlist_load(playlists[position_new])
xmms.disconnect()
| gpl-3.0 |
michelts/lettuce | tests/integration/lib/Django-1.3/tests/modeltests/or_lookups/tests.py | 92 | 7584 | from datetime import datetime
from operator import attrgetter
from django.db.models import Q
from django.test import TestCase
from models import Article
class OrLookupsTests(TestCase):
def setUp(self):
self.a1 = Article.objects.create(
headline='Hello', pub_date=datetime(2005, 11, 27)
).pk
self.a2 = Article.objects.create(
headline='Goodbye', pub_date=datetime(2005, 11, 28)
).pk
self.a3 = Article.objects.create(
headline='Hello and goodbye', pub_date=datetime(2005, 11, 29)
).pk
def test_filter_or(self):
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Hello') | Article.objects.filter(headline__startswith='Goodbye'), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Hello') | Article.objects.filter(headline__contains='bye'), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iexact='Hello') | Article.objects.filter(headline__contains='ood'), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__startswith='Goodbye')), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
def test_stages(self):
# You can shorten this syntax with code like the following, which is
# especially useful if building the query in stages:
articles = Article.objects.all()
self.assertQuerysetEqual(
articles.filter(headline__startswith='Hello') & articles.filter(headline__startswith='Goodbye'),
[]
)
self.assertQuerysetEqual(
articles.filter(headline__startswith='Hello') & articles.filter(headline__contains='bye'), [
'Hello and goodbye'
],
attrgetter("headline")
)
def test_pk_q(self):
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2)), [
'Hello',
'Goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2) | Q(pk=self.a3)), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_pk_in(self):
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[self.a1, self.a2, self.a3]), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(pk__in=(self.a1, self.a2, self.a3)), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[self.a1, self.a2, self.a3, 40000]), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_q_negated(self):
# Q objects can be negated
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | ~Q(pk=self.a2)), [
'Hello',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(~Q(pk=self.a1) & ~Q(pk=self.a2)), [
'Hello and goodbye'
],
attrgetter("headline"),
)
# This allows for more complex queries than filter() and exclude()
# alone would allow
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) & (~Q(pk=self.a2) | Q(pk=self.a3))), [
'Hello'
],
attrgetter("headline"),
)
def test_complex_filter(self):
# The 'complex_filter' method supports framework features such as
# 'limit_choices_to' which normally take a single dictionary of lookup
# arguments but need to support arbitrary queries via Q objects too.
self.assertQuerysetEqual(
Article.objects.complex_filter({'pk': self.a1}), [
'Hello'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.complex_filter(Q(pk=self.a1) | Q(pk=self.a2)), [
'Hello',
'Goodbye'
],
attrgetter("headline"),
)
def test_empty_in(self):
# Passing "in" an empty list returns no results ...
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[]),
[]
)
# ... but can return results if we OR it with another query.
self.assertQuerysetEqual(
Article.objects.filter(Q(pk__in=[]) | Q(headline__icontains='goodbye')), [
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_q_and(self):
# Q arg objects are ANDed
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')), [
'Hello and goodbye'
],
attrgetter("headline")
)
# Q arg AND order is irrelevant
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__contains='bye'), headline__startswith='Hello'), [
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello') & Q(headline__startswith='Goodbye')),
[]
)
def test_q_exclude(self):
self.assertQuerysetEqual(
Article.objects.exclude(Q(headline__startswith='Hello')), [
'Goodbye'
],
attrgetter("headline")
)
def test_other_arg_queries(self):
# Try some arg queries with operations other than filter.
self.assertEqual(
Article.objects.get(Q(headline__startswith='Hello'), Q(headline__contains='bye')).headline,
'Hello and goodbye'
)
self.assertEqual(
Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__contains='bye')).count(),
3
)
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')).values(), [
{"headline": "Hello and goodbye", "id": self.a3, "pub_date": datetime(2005, 11, 29)},
],
lambda o: o,
)
self.assertEqual(
Article.objects.filter(Q(headline__startswith='Hello')).in_bulk([self.a1, self.a2]),
{self.a1: Article.objects.get(pk=self.a1)}
)
| gpl-3.0 |
shermanng10/superathletebuilder | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/jisfreq.py | 3131 | 47315 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
| mit |
ging/horizon | openstack_dashboard/dashboards/project/data_processing/nodegroup_templates/workflows/create.py | 2 | 11715 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from saharaclient.api import base as api_base
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard.api import network
from openstack_dashboard.api import sahara as saharaclient
from openstack_dashboard.dashboards.project.data_processing.utils \
import helpers
from openstack_dashboard.dashboards.project.data_processing.utils \
import workflow_helpers
from openstack_dashboard.dashboards.project.instances \
import utils as nova_utils
LOG = logging.getLogger(__name__)
class GeneralConfigAction(workflows.Action):
nodegroup_name = forms.CharField(label=_("Template Name"))
description = forms.CharField(label=_("Description"),
required=False,
widget=forms.Textarea(attrs={'rows': 4}))
flavor = forms.ChoiceField(label=_("OpenStack Flavor"))
storage = forms.ChoiceField(
label=_("Storage location"),
help_text=_("Choose a storage location"),
choices=[("ephemeral_drive", "Ephemeral Drive"),
("cinder_volume", "Cinder Volume")],
widget=forms.Select(attrs={"class": "storage_field"}))
volumes_per_node = forms.IntegerField(
label=_("Volumes per node"),
required=False,
initial=1,
widget=forms.TextInput(attrs={"class": "volume_per_node_field"})
)
volumes_size = forms.IntegerField(
label=_("Volumes size (GB)"),
required=False,
initial=10,
widget=forms.TextInput(attrs={"class": "volume_size_field"})
)
hidden_configure_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_configure_field"}))
def __init__(self, request, *args, **kwargs):
super(GeneralConfigAction, self).__init__(request, *args, **kwargs)
hlps = helpers.Helpers(request)
plugin, hadoop_version = (
workflow_helpers.get_plugin_and_hadoop_version(request))
process_choices = []
try:
version_details = saharaclient.plugin_get_version_details(
request, plugin, hadoop_version)
for service, processes in version_details.node_processes.items():
for process in processes:
process_choices.append(
(str(service) + ":" + str(process), process))
except Exception:
exceptions.handle(request,
_("Unable to generate process choices."))
if not saharaclient.SAHARA_AUTO_IP_ALLOCATION_ENABLED:
pools = network.floating_ip_pools_list(request)
pool_choices = [(pool.id, pool.name) for pool in pools]
pool_choices.insert(0, (None, "Do not assign floating IPs"))
self.fields['floating_ip_pool'] = forms.ChoiceField(
label=_("Floating IP pool"),
choices=pool_choices,
required=False)
self.fields["autogroup"] = forms.BooleanField(
label=_("Auto Security Group"),
widget=forms.CheckboxInput(),
help_text=_("Create security group for this Node Group."),
required=False)
groups = network.security_group_list(request)
security_group_list = [(sg.id, sg.name) for sg in groups]
self.fields["groups"] = forms.MultipleChoiceField(
label=_("Security Groups"),
widget=forms.CheckboxSelectMultiple(),
help_text=_("Launch instances in these security groups."),
choices=security_group_list,
required=False)
self.fields["processes"] = forms.MultipleChoiceField(
label=_("Processes"),
widget=forms.CheckboxSelectMultiple(),
help_text=_("Processes to be launched in node group"),
choices=process_choices)
self.fields["plugin_name"] = forms.CharField(
widget=forms.HiddenInput(),
initial=plugin
)
self.fields["hadoop_version"] = forms.CharField(
widget=forms.HiddenInput(),
initial=hadoop_version
)
node_parameters = hlps.get_general_node_group_configs(plugin,
hadoop_version)
for param in node_parameters:
self.fields[param.name] = workflow_helpers.build_control(param)
def populate_flavor_choices(self, request, context):
flavors = nova_utils.flavor_list(request)
if flavors:
return nova_utils.sort_flavor_list(request, flavors)
return []
def get_help_text(self):
extra = dict()
plugin, hadoop_version = (
workflow_helpers.get_plugin_and_hadoop_version(self.request))
extra["plugin_name"] = plugin
extra["hadoop_version"] = hadoop_version
return super(GeneralConfigAction, self).get_help_text(extra)
class Meta:
name = _("Configure Node Group Template")
help_text_template = (
"project/data_processing.nodegroup_templates"
"/_configure_general_help.html")
class GeneralConfig(workflows.Step):
action_class = GeneralConfigAction
contributes = ("general_nodegroup_name", )
def contribute(self, data, context):
for k, v in data.items():
if "hidden" in k:
continue
context["general_" + k] = v if v != "None" else None
post = self.workflow.request.POST
context['general_processes'] = post.getlist("processes")
return context
class ConfigureNodegroupTemplate(workflow_helpers.ServiceParametersWorkflow,
workflow_helpers.StatusFormatMixin):
slug = "configure_nodegroup_template"
name = _("Create Node Group Template")
finalize_button_name = _("Create")
success_message = _("Created Node Group Template %s")
name_property = "general_nodegroup_name"
success_url = "horizon:project:data_processing.nodegroup_templates:index"
default_steps = (GeneralConfig,)
def __init__(self, request, context_seed, entry_point, *args, **kwargs):
hlps = helpers.Helpers(request)
plugin, hadoop_version = (
workflow_helpers.get_plugin_and_hadoop_version(request))
general_parameters = hlps.get_general_node_group_configs(
plugin,
hadoop_version)
service_parameters = hlps.get_targeted_node_group_configs(
plugin,
hadoop_version)
self._populate_tabs(general_parameters, service_parameters)
super(ConfigureNodegroupTemplate, self).__init__(request,
context_seed,
entry_point,
*args, **kwargs)
def is_valid(self):
missing = self.depends_on - set(self.context.keys())
if missing:
raise exceptions.WorkflowValidationError(
"Unable to complete the workflow. The values %s are "
"required but not present." % ", ".join(missing))
checked_steps = []
if "general_processes" in self.context:
checked_steps = self.context["general_processes"]
enabled_services = set([])
for process_name in checked_steps:
enabled_services.add(str(process_name).split(":")[0])
steps_valid = True
for step in self.steps:
process_name = str(getattr(step, "process_name", None))
if process_name not in enabled_services and \
not isinstance(step, GeneralConfig):
continue
if not step.action.is_valid():
steps_valid = False
step.has_errors = True
if not steps_valid:
return steps_valid
return self.validate(self.context)
def handle(self, request, context):
try:
processes = []
for service_process in context["general_processes"]:
processes.append(str(service_process).split(":")[1])
configs_dict = (
workflow_helpers.parse_configs_from_context(
context, self.defaults))
plugin, hadoop_version = (
workflow_helpers.get_plugin_and_hadoop_version(request))
volumes_per_node = None
volumes_size = None
if context["general_storage"] == "cinder_volume":
volumes_per_node = context["general_volumes_per_node"]
volumes_size = context["general_volumes_size"]
saharaclient.nodegroup_template_create(
request,
name=context["general_nodegroup_name"],
plugin_name=plugin,
hadoop_version=hadoop_version,
description=context["general_description"],
flavor_id=context["general_flavor"],
volumes_per_node=volumes_per_node,
volumes_size=volumes_size,
node_processes=processes,
node_configs=configs_dict,
floating_ip_pool=context.get("general_floating_ip_pool"),
security_groups=context["general_groups"],
auto_security_group=context["general_autogroup"])
return True
except api_base.APIException as e:
self.error_description = str(e)
return False
except Exception:
exceptions.handle(request)
class SelectPluginAction(workflows.Action,
workflow_helpers.PluginAndVersionMixin):
hidden_create_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
def __init__(self, request, *args, **kwargs):
super(SelectPluginAction, self).__init__(request, *args, **kwargs)
sahara = saharaclient.client(request)
self._generate_plugin_version_fields(sahara)
class Meta:
name = _("Select plugin and hadoop version")
help_text_template = ("project/data_processing.nodegroup_templates"
"/_create_general_help.html")
class SelectPlugin(workflows.Step):
action_class = SelectPluginAction
contributes = ("plugin_name", "hadoop_version")
def contribute(self, data, context):
context = super(SelectPlugin, self).contribute(data, context)
context["plugin_name"] = data.get('plugin_name', None)
context["hadoop_version"] = \
data.get(context["plugin_name"] + "_version", None)
return context
class CreateNodegroupTemplate(workflows.Workflow):
slug = "create_nodegroup_template"
name = _("Create Node Group Template")
finalize_button_name = _("Create")
success_message = _("Created")
failure_message = _("Could not create")
success_url = "horizon:project:data_processing.nodegroup_templates:index"
default_steps = (SelectPlugin,)
| apache-2.0 |
bbbenja/SickRage | lib/sqlalchemy/sql/base.py | 76 | 21130 | # sql/base.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Foundational utilities common to many sql modules.
"""
from .. import util, exc
import itertools
from .visitors import ClauseVisitor
import re
import collections
PARSE_AUTOCOMMIT = util.symbol('PARSE_AUTOCOMMIT')
NO_ARG = util.symbol('NO_ARG')
class Immutable(object):
"""mark a ClauseElement as 'immutable' when expressions are cloned."""
def unique_params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def _clone(self):
return self
def _from_objects(*elements):
return itertools.chain(*[element._from_objects for element in elements])
@util.decorator
def _generative(fn, *args, **kw):
"""Mark a method as generative."""
self = args[0]._generate()
fn(self, *args[1:], **kw)
return self
class _DialectArgView(collections.MutableMapping):
"""A dictionary view of dialect-level arguments in the form
<dialectname>_<argument_name>.
"""
def __init__(self, obj):
self.obj = obj
def _key(self, key):
try:
dialect, value_key = key.split("_", 1)
except ValueError:
raise KeyError(key)
else:
return dialect, value_key
def __getitem__(self, key):
dialect, value_key = self._key(key)
try:
opt = self.obj.dialect_options[dialect]
except exc.NoSuchModuleError:
raise KeyError(key)
else:
return opt[value_key]
def __setitem__(self, key, value):
try:
dialect, value_key = self._key(key)
except KeyError:
raise exc.ArgumentError(
"Keys must be of the form <dialectname>_<argname>")
else:
self.obj.dialect_options[dialect][value_key] = value
def __delitem__(self, key):
dialect, value_key = self._key(key)
del self.obj.dialect_options[dialect][value_key]
def __len__(self):
return sum(len(args._non_defaults) for args in
self.obj.dialect_options.values())
def __iter__(self):
return (
"%s_%s" % (dialect_name, value_name)
for dialect_name in self.obj.dialect_options
for value_name in self.obj.dialect_options[dialect_name]._non_defaults
)
class _DialectArgDict(collections.MutableMapping):
"""A dictionary view of dialect-level arguments for a specific
dialect.
Maintains a separate collection of user-specified arguments
and dialect-specified default arguments.
"""
def __init__(self):
self._non_defaults = {}
self._defaults = {}
def __len__(self):
return len(set(self._non_defaults).union(self._defaults))
def __iter__(self):
return iter(set(self._non_defaults).union(self._defaults))
def __getitem__(self, key):
if key in self._non_defaults:
return self._non_defaults[key]
else:
return self._defaults[key]
def __setitem__(self, key, value):
self._non_defaults[key] = value
def __delitem__(self, key):
del self._non_defaults[key]
class DialectKWArgs(object):
"""Establish the ability for a class to have dialect-specific arguments
with defaults and constructor validation.
The :class:`.DialectKWArgs` interacts with the
:attr:`.DefaultDialect.construct_arguments` present on a dialect.
.. seealso::
:attr:`.DefaultDialect.construct_arguments`
"""
@classmethod
def argument_for(cls, dialect_name, argument_name, default):
"""Add a new kind of dialect-specific keyword argument for this class.
E.g.::
Index.argument_for("mydialect", "length", None)
some_index = Index('a', 'b', mydialect_length=5)
The :meth:`.DialectKWArgs.argument_for` method is a per-argument
way adding extra arguments to the :attr:`.DefaultDialect.construct_arguments`
dictionary. This dictionary provides a list of argument names accepted by
various schema-level constructs on behalf of a dialect.
New dialects should typically specify this dictionary all at once as a data
member of the dialect class. The use case for ad-hoc addition of
argument names is typically for end-user code that is also using
a custom compilation scheme which consumes the additional arguments.
:param dialect_name: name of a dialect. The dialect must be locatable,
else a :class:`.NoSuchModuleError` is raised. The dialect must
also include an existing :attr:`.DefaultDialect.construct_arguments` collection,
indicating that it participates in the keyword-argument validation and
default system, else :class:`.ArgumentError` is raised.
If the dialect does not include this collection, then any keyword argument
can be specified on behalf of this dialect already. All dialects
packaged within SQLAlchemy include this collection, however for third
party dialects, support may vary.
:param argument_name: name of the parameter.
:param default: default value of the parameter.
.. versionadded:: 0.9.4
"""
construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name]
if construct_arg_dictionary is None:
raise exc.ArgumentError("Dialect '%s' does have keyword-argument "
"validation and defaults enabled configured" %
dialect_name)
construct_arg_dictionary[cls][argument_name] = default
@util.memoized_property
def dialect_kwargs(self):
"""A collection of keyword arguments specified as dialect-specific
options to this construct.
The arguments are present here in their original ``<dialect>_<kwarg>``
format. Only arguments that were actually passed are included;
unlike the :attr:`.DialectKWArgs.dialect_options` collection, which
contains all options known by this dialect including defaults.
The collection is also writable; keys are accepted of the
form ``<dialect>_<kwarg>`` where the value will be assembled
into the list of options.
.. versionadded:: 0.9.2
.. versionchanged:: 0.9.4 The :attr:`.DialectKWArgs.dialect_kwargs`
collection is now writable.
.. seealso::
:attr:`.DialectKWArgs.dialect_options` - nested dictionary form
"""
return _DialectArgView(self)
@property
def kwargs(self):
"""A synonym for :attr:`.DialectKWArgs.dialect_kwargs`."""
return self.dialect_kwargs
@util.dependencies("sqlalchemy.dialects")
def _kw_reg_for_dialect(dialects, dialect_name):
dialect_cls = dialects.registry.load(dialect_name)
if dialect_cls.construct_arguments is None:
return None
return dict(dialect_cls.construct_arguments)
_kw_registry = util.PopulateDict(_kw_reg_for_dialect)
def _kw_reg_for_dialect_cls(self, dialect_name):
construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name]
d = _DialectArgDict()
if construct_arg_dictionary is None:
d._defaults.update({"*": None})
else:
for cls in reversed(self.__class__.__mro__):
if cls in construct_arg_dictionary:
d._defaults.update(construct_arg_dictionary[cls])
return d
@util.memoized_property
def dialect_options(self):
"""A collection of keyword arguments specified as dialect-specific
options to this construct.
This is a two-level nested registry, keyed to ``<dialect_name>``
and ``<argument_name>``. For example, the ``postgresql_where`` argument
would be locatable as::
arg = my_object.dialect_options['postgresql']['where']
.. versionadded:: 0.9.2
.. seealso::
:attr:`.DialectKWArgs.dialect_kwargs` - flat dictionary form
"""
return util.PopulateDict(
util.portable_instancemethod(self._kw_reg_for_dialect_cls)
)
def _validate_dialect_kwargs(self, kwargs):
# validate remaining kwargs that they all specify DB prefixes
if not kwargs:
return
for k in kwargs:
m = re.match('^(.+?)_(.+)$', k)
if not m:
raise TypeError("Additional arguments should be "
"named <dialectname>_<argument>, got '%s'" % k)
dialect_name, arg_name = m.group(1, 2)
try:
construct_arg_dictionary = self.dialect_options[dialect_name]
except exc.NoSuchModuleError:
util.warn(
"Can't validate argument %r; can't "
"locate any SQLAlchemy dialect named %r" %
(k, dialect_name))
self.dialect_options[dialect_name] = d = _DialectArgDict()
d._defaults.update({"*": None})
d._non_defaults[arg_name] = kwargs[k]
else:
if "*" not in construct_arg_dictionary and \
arg_name not in construct_arg_dictionary:
raise exc.ArgumentError(
"Argument %r is not accepted by "
"dialect %r on behalf of %r" % (
k,
dialect_name, self.__class__
))
else:
construct_arg_dictionary[arg_name] = kwargs[k]
class Generative(object):
"""Allow a ClauseElement to generate itself via the
@_generative decorator.
"""
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class Executable(Generative):
"""Mark a ClauseElement as supporting execution.
:class:`.Executable` is a superclass for all "statement" types
of objects, including :func:`select`, :func:`delete`, :func:`update`,
:func:`insert`, :func:`text`.
"""
supports_execution = True
_execution_options = util.immutabledict()
_bind = None
@_generative
def execution_options(self, **kw):
""" Set non-SQL options for the statement which take effect during
execution.
Execution options can be set on a per-statement or
per :class:`.Connection` basis. Additionally, the
:class:`.Engine` and ORM :class:`~.orm.query.Query` objects provide
access to execution options which they in turn configure upon
connections.
The :meth:`execution_options` method is generative. A new
instance of this statement is returned that contains the options::
statement = select([table.c.x, table.c.y])
statement = statement.execution_options(autocommit=True)
Note that only a subset of possible execution options can be applied
to a statement - these include "autocommit" and "stream_results",
but not "isolation_level" or "compiled_cache".
See :meth:`.Connection.execution_options` for a full list of
possible options.
.. seealso::
:meth:`.Connection.execution_options()`
:meth:`.Query.execution_options()`
"""
if 'isolation_level' in kw:
raise exc.ArgumentError(
"'isolation_level' execution option may only be specified "
"on Connection.execution_options(), or "
"per-engine using the isolation_level "
"argument to create_engine()."
)
if 'compiled_cache' in kw:
raise exc.ArgumentError(
"'compiled_cache' execution option may only be specified "
"on Connection.execution_options(), not per statement."
)
self._execution_options = self._execution_options.union(kw)
def execute(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`."""
e = self.bind
if e is None:
label = getattr(self, 'description', self.__class__.__name__)
msg = ('This %s is not directly bound to a Connection or Engine.'
'Use the .execute() method of a Connection or Engine '
'to execute this construct.' % label)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`, returning the
result's scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
@property
def bind(self):
"""Returns the :class:`.Engine` or :class:`.Connection` to
which this :class:`.Executable` is bound, or None if none found.
This is a traversal which checks locally, then
checks among the "from" clauses of associated objects
until a bound engine or connection is found.
"""
if self._bind is not None:
return self._bind
for f in _from_objects(self):
if f is self:
continue
engine = f.bind
if engine is not None:
return engine
else:
return None
class SchemaEventTarget(object):
"""Base class for elements that are the targets of :class:`.DDLEvents`
events.
This includes :class:`.SchemaItem` as well as :class:`.SchemaType`.
"""
def _set_parent(self, parent):
"""Associate with this SchemaEvent's parent object."""
raise NotImplementedError()
def _set_parent_with_dispatch(self, parent):
self.dispatch.before_parent_attach(self, parent)
self._set_parent(parent)
self.dispatch.after_parent_attach(self, parent)
class SchemaVisitor(ClauseVisitor):
"""Define the visiting for ``SchemaItem`` objects."""
__traverse_options__ = {'schema_visitor': True}
class ColumnCollection(util.OrderedProperties):
"""An ordered dictionary that stores a list of ColumnElement
instances.
Overrides the ``__eq__()`` method to produce SQL clauses between
sets of correlated columns.
"""
def __init__(self):
super(ColumnCollection, self).__init__()
self.__dict__['_all_col_set'] = util.column_set()
self.__dict__['_all_columns'] = []
def __str__(self):
return repr([str(c) for c in self])
def replace(self, column):
"""add the given column to this collection, removing unaliased
versions of this column as well as existing columns with the
same key.
e.g.::
t = Table('sometable', metadata, Column('col1', Integer))
t.columns.replace(Column('col1', Integer, key='columnone'))
will remove the original 'col1' from the collection, and add
the new column under the name 'columnname'.
Used by schema.Column to override columns during table reflection.
"""
remove_col = None
if column.name in self and column.key != column.name:
other = self[column.name]
if other.name == other.key:
remove_col = other
self._all_col_set.remove(other)
del self._data[other.key]
if column.key in self._data:
remove_col = self._data[column.key]
self._all_col_set.remove(remove_col)
self._all_col_set.add(column)
self._data[column.key] = column
if remove_col is not None:
self._all_columns[:] = [column if c is remove_col
else c for c in self._all_columns]
else:
self._all_columns.append(column)
def add(self, column):
"""Add a column to this collection.
The key attribute of the column will be used as the hash key
for this dictionary.
"""
self[column.key] = column
def __delitem__(self, key):
raise NotImplementedError()
def __setattr__(self, key, object):
raise NotImplementedError()
def __setitem__(self, key, value):
if key in self:
# this warning is primarily to catch select() statements
# which have conflicting column names in their exported
# columns collection
existing = self[key]
if not existing.shares_lineage(value):
util.warn('Column %r on table %r being replaced by '
'%r, which has the same key. Consider '
'use_labels for select() statements.' % (key,
getattr(existing, 'table', None), value))
# pop out memoized proxy_set as this
# operation may very well be occurring
# in a _make_proxy operation
util.memoized_property.reset(value, "proxy_set")
self._all_col_set.add(value)
self._all_columns.append(value)
self._data[key] = value
def clear(self):
raise NotImplementedError()
def remove(self, column):
del self._data[column.key]
self._all_col_set.remove(column)
self._all_columns[:] = [c for c in self._all_columns if c is not column]
def update(self, iter):
cols = list(iter)
self._all_columns.extend(c for label, c in cols if c not in self._all_col_set)
self._all_col_set.update(c for label, c in cols)
self._data.update((label, c) for label, c in cols)
def extend(self, iter):
cols = list(iter)
self._all_columns.extend(c for c in cols if c not in self._all_col_set)
self._all_col_set.update(cols)
self._data.update((c.key, c) for c in cols)
__hash__ = None
@util.dependencies("sqlalchemy.sql.elements")
def __eq__(self, elements, other):
l = []
for c in getattr(other, "_all_columns", other):
for local in self._all_columns:
if c.shares_lineage(local):
l.append(c == local)
return elements.and_(*l)
def __contains__(self, other):
if not isinstance(other, util.string_types):
raise exc.ArgumentError("__contains__ requires a string argument")
return util.OrderedProperties.__contains__(self, other)
def __getstate__(self):
return {'_data': self.__dict__['_data'],
'_all_columns': self.__dict__['_all_columns']}
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
self.__dict__['_all_columns'] = state['_all_columns']
self.__dict__['_all_col_set'] = util.column_set(state['_all_columns'])
def contains_column(self, col):
# this has to be done via set() membership
return col in self._all_col_set
def as_immutable(self):
return ImmutableColumnCollection(self._data, self._all_col_set, self._all_columns)
class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection):
def __init__(self, data, colset, all_columns):
util.ImmutableProperties.__init__(self, data)
self.__dict__['_all_col_set'] = colset
self.__dict__['_all_columns'] = all_columns
extend = remove = util.ImmutableProperties._immutable
class ColumnSet(util.ordered_column_set):
def contains_column(self, col):
return col in self
def extend(self, cols):
for col in cols:
self.add(col)
def __add__(self, other):
return list(self) + list(other)
@util.dependencies("sqlalchemy.sql.elements")
def __eq__(self, elements, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c == local)
return elements.and_(*l)
def __hash__(self):
return hash(tuple(x for x in self))
def _bind_or_error(schemaitem, msg=None):
bind = schemaitem.bind
if not bind:
name = schemaitem.__class__.__name__
label = getattr(schemaitem, 'fullname',
getattr(schemaitem, 'name', None))
if label:
item = '%s object %r' % (name, label)
else:
item = '%s object' % name
if msg is None:
msg = "%s is not bound to an Engine or Connection. "\
"Execution can not proceed without a database to execute "\
"against." % item
raise exc.UnboundExecutionError(msg)
return bind
| gpl-3.0 |
fishcorn/pylearn2 | pylearn2/devtools/list_files.py | 45 | 1772 | """Code for listing files that belong to the library."""
import logging
import pylearn2
import os
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
logger = logging.getLogger(__name__)
def list_files(suffix=""):
"""
Returns a list of all files in pylearn2 with the given suffix.
Parameters
----------
suffix : str
Returns
-------
file_list : list
A list of all files in pylearn2 whose filepath ends with `suffix`
"""
pl2_path, = pylearn2.__path__
file_list = _list_files(pl2_path, suffix)
return file_list
def _list_files(path, suffix=""):
"""
.. todo::
WRITEME
Parameters
----------
path : str
a filepath
suffix : str
Returns
-------
l : list
A list of all files ending in `suffix` contained within `path`.
(If `path` is a file rather than a directory, it is considered
to "contain" itself)
"""
if os.path.isdir(path):
incomplete = os.listdir(path)
complete = [os.path.join(path, entry) for entry in incomplete]
lists = [_list_files(subpath, suffix) for subpath in complete]
flattened = []
for l in lists:
for elem in l:
flattened.append(elem)
return flattened
else:
assert os.path.exists(path), "couldn't find file '%s'" % path
if path.endswith(suffix):
return [path]
return []
if __name__ == '__main__':
# Print all .py files in the library
result = list_files('.py')
for path in result:
logger.info(path)
| bsd-3-clause |
coffenbacher/askbot-devel | askbot/models/__init__.py | 4 | 143888 | from askbot import startup_procedures
startup_procedures.run()
from django.contrib.auth.models import User
#set up a possibility for the users to follow others
try:
import followit
followit.register(User)
except ImportError:
pass
import collections
import datetime
import hashlib
import logging
import re
import urllib
import uuid
from celery import states
from celery.task import task
from django.core.urlresolvers import reverse, NoReverseMatch
from django.core.paginator import Paginator
from django.db.models import signals as django_signals
from django.template import Context
from django.template.loader import get_template
from django.utils.translation import get_language
from django.utils.translation import string_concat
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.utils.safestring import mark_safe
from django.utils.html import escape
from django.db import models
from django.conf import settings as django_settings
from django.contrib.contenttypes.models import ContentType
from django.core import cache
from django.core import exceptions as django_exceptions
from django_countries.fields import CountryField
from askbot import exceptions as askbot_exceptions
from askbot import const
from askbot.const import message_keys
from askbot.conf import settings as askbot_settings
from askbot.models.question import Thread
from askbot.skins import utils as skin_utils
from askbot.mail import messages
from askbot.models.question import QuestionView, AnonymousQuestion
from askbot.models.question import DraftQuestion
from askbot.models.question import FavoriteQuestion
from askbot.models.tag import Tag, MarkedTag, TagSynonym
from askbot.models.tag import format_personal_group_name
from askbot.models.user import EmailFeedSetting, ActivityAuditStatus, Activity
from askbot.models.user import GroupMembership
from askbot.models.user import Group
from askbot.models.user import BulkTagSubscription
from askbot.models.post import Post, PostRevision
from askbot.models.post import PostFlagReason, AnonymousAnswer
from askbot.models.post import PostToGroup
from askbot.models.post import DraftAnswer
from askbot.models.reply_by_email import ReplyAddress
from askbot.models.badges import award_badges_signal, get_badge
from askbot.models.repute import Award, Repute, Vote, BadgeData
from askbot.models.widgets import AskWidget, QuestionWidget
from askbot.models.meta import ImportRun, ImportedObjectInfo
from askbot import auth
from askbot.utils.decorators import auto_now_timestamp
from askbot.utils.markup import URL_RE
from askbot.utils.slug import slugify
from askbot.utils.html import replace_links_with_text
from askbot.utils.html import sanitize_html
from askbot.utils.html import site_url
from askbot.utils.diff import textDiff as htmldiff
from askbot.utils.url_utils import strip_path
from askbot import mail
from askbot.models import signals
from django import VERSION
#stores the 1.X version not the security release numbers
DJANGO_VERSION = VERSION[:2]
if DJANGO_VERSION > (1, 3):
from askbot.models.message import Message
else:
from django.contrib.messages.models import Message
def get_model(model_name):
"""a shortcut for getting model for an askbot app"""
return models.get_model('askbot', model_name)
def get_admin():
"""returns admin with the lowest user ID
if there are no users at all - creates one
with name "admin" and unusable password
otherwise raises User.DoesNotExist
"""
try:
return User.objects.filter(
is_superuser=True
).order_by('id')[0]
except IndexError:
if User.objects.filter(username='_admin_').count() == 0:
admin = User.objects.create_user('_admin_', '')
admin.set_unusable_password()
admin.set_status('d')
return admin
else:
raise User.DoesNotExist
def get_users_by_text_query(search_query, users_query_set = None):
"""Runs text search in user names and profile.
For postgres, search also runs against user group names.
"""
if getattr(django_settings, 'ENABLE_HAYSTACK_SEARCH', False):
from askbot.search.haystack.searchquery import AskbotSearchQuerySet
qs = AskbotSearchQuerySet().filter(content=search_query)
qs = qs.models(User).get_django_queryset(User)
return qs
else:
import askbot
if users_query_set is None:
users_query_set = User.objects.all()
if 'postgresql_psycopg2' in askbot.get_database_engine_name():
from askbot.search import postgresql
return postgresql.run_user_search(users_query_set, search_query)
else:
return users_query_set.filter(
models.Q(username__icontains=search_query) |
models.Q(about__icontains=search_query)
)
#if askbot.get_database_engine_name().endswith('mysql') \
# and mysql.supports_full_text_search():
# return User.objects.filter(
# models.Q(username__search = search_query) |
# models.Q(about__search = search_query)
# )
class RelatedObjectSimulator(object):
'''Objects that simulates the "messages_set" related field
somehow django does not creates it automatically in django1.4.1'''
def __init__(self, user, model_class):
self.user = user
self.model_class = model_class
def all(self):
return self.model_class.objects.all()
def count(self, **kwargs):
kwargs['user'] = self.user
return self.model_class.objects.filter(**kwargs).count()
def create(self, **kwargs):
return self.model_class.objects.create(user=self.user, **kwargs)
def filter(self, *args, **kwargs):
return self.model_class.objects.filter(*args, **kwargs)
#django 1.4.1 and above
@property
def user_message_set(self):
return RelatedObjectSimulator(self, Message)
#django 1.4.1 and above
def user_get_and_delete_messages(self):
messages = []
for message in Message.objects.filter(user=self):
messages.append(message.message)
message.delete()
return messages
if DJANGO_VERSION > (1, 3):
User.add_to_class('message_set', user_message_set)
User.add_to_class('get_and_delete_messages', user_get_and_delete_messages)
User.add_to_class(
'status',
models.CharField(
max_length = 2,
default = const.DEFAULT_USER_STATUS,
choices = const.USER_STATUS_CHOICES
)
)
User.add_to_class('is_fake', models.BooleanField(default=False))
User.add_to_class('email_isvalid', models.BooleanField(default=False)) #@UndefinedVariable
User.add_to_class('email_key', models.CharField(max_length=32, null=True))
#hardcoded initial reputaion of 1, no setting for this one
User.add_to_class('reputation',
models.PositiveIntegerField(default=const.MIN_REPUTATION)
)
User.add_to_class('gravatar', models.CharField(max_length=32))
#User.add_to_class('has_custom_avatar', models.BooleanField(default=False))
User.add_to_class(
'avatar_type',
models.CharField(max_length=1,
choices=const.AVATAR_STATUS_CHOICE,
default='n')
)
User.add_to_class('gold', models.SmallIntegerField(default=0))
User.add_to_class('silver', models.SmallIntegerField(default=0))
User.add_to_class('bronze', models.SmallIntegerField(default=0))
User.add_to_class(
'questions_per_page', # TODO: remove me and const.QUESTIONS_PER_PAGE_USER_CHOICES, we're no longer used!
models.SmallIntegerField(
choices=const.QUESTIONS_PER_PAGE_USER_CHOICES,
default=10
)
)
User.add_to_class('last_seen',
models.DateTimeField(default=datetime.datetime.now))
User.add_to_class('real_name', models.CharField(max_length=100, blank=True))
User.add_to_class('website', models.URLField(max_length=200, blank=True))
#location field is actually city
User.add_to_class('location', models.CharField(max_length=100, blank=True))
User.add_to_class('country', CountryField(blank = True))
User.add_to_class('show_country', models.BooleanField(default = False))
User.add_to_class('date_of_birth', models.DateField(null=True, blank=True))
User.add_to_class('about', models.TextField(blank=True))
#interesting tags and ignored tags are to store wildcard tag selections only
User.add_to_class('interesting_tags', models.TextField(blank = True))
User.add_to_class('ignored_tags', models.TextField(blank = True))
User.add_to_class('subscribed_tags', models.TextField(blank = True))
User.add_to_class('email_signature', models.TextField(blank = True))
User.add_to_class('show_marked_tags', models.BooleanField(default = True))
User.add_to_class(
'email_tag_filter_strategy',
models.SmallIntegerField(
choices=const.TAG_EMAIL_FILTER_FULL_STRATEGY_CHOICES,
default=const.EXCLUDE_IGNORED
)
)
User.add_to_class(
'display_tag_filter_strategy',
models.SmallIntegerField(
choices=const.TAG_DISPLAY_FILTER_STRATEGY_CHOICES,
default=const.INCLUDE_ALL
)
)
User.add_to_class('new_response_count', models.IntegerField(default=0))
User.add_to_class('seen_response_count', models.IntegerField(default=0))
User.add_to_class('consecutive_days_visit_count', models.IntegerField(default = 0))
#list of languages for which user should receive email alerts
User.add_to_class(
'languages',
models.CharField(max_length=128, default=django_settings.LANGUAGE_CODE)
)
User.add_to_class(
'twitter_access_token',
models.CharField(max_length=256, default='')
)
User.add_to_class(
'twitter_handle',
models.CharField(max_length=32, default='')
)
User.add_to_class(
'social_sharing_mode',
models.IntegerField(
default=const.SHARE_NOTHING,
choices = const.SOCIAL_SHARING_MODE_CHOICES
)
)
GRAVATAR_TEMPLATE = "%(gravatar_url)s/%(gravatar)s?" + \
"s=%(size)d&d=%(type)s&r=PG"
def user_get_gravatar_url(self, size):
"""returns gravatar url
"""
return GRAVATAR_TEMPLATE % {
'gravatar_url': askbot_settings.GRAVATAR_BASE_URL,
'gravatar': self.gravatar,
'type': askbot_settings.GRAVATAR_TYPE,
'size': size,
}
def user_get_default_avatar_url(self, size):
"""returns default avatar url
"""
return skin_utils.get_media_url(askbot_settings.DEFAULT_AVATAR_URL)
def user_get_avatar_url(self, size=48):
"""returns avatar url - by default - gravatar,
but if application django-avatar is installed
it will use avatar provided through that app
"""
if 'avatar' in django_settings.INSTALLED_APPS:
if self.avatar_type == 'n':
import avatar
if askbot_settings.ENABLE_GRAVATAR: #avatar.settings.AVATAR_GRAVATAR_BACKUP:
return self.get_gravatar_url(size)
else:
return self.get_default_avatar_url(size)
elif self.avatar_type == 'a':
kwargs = {'user_id': self.id, 'size': size}
try:
return reverse('avatar_render_primary', kwargs = kwargs)
except NoReverseMatch:
message = 'Please, make sure that avatar urls are in the urls.py '\
'or update your django-avatar app, '\
'currently it is impossible to serve avatars.'
logging.critical(message)
raise django_exceptions.ImproperlyConfigured(message)
else:
return self.get_gravatar_url(size)
if askbot_settings.ENABLE_GRAVATAR:
return self.get_gravatar_url(size)
else:
return self.get_default_avatar_url(size)
def user_get_top_answers_paginator(self, visitor=None):
"""get paginator for top answers by the user for a
specific visitor"""
answers = self.posts.get_answers(
visitor
).filter(
deleted=False,
thread__deleted=False
).select_related(
'thread'
).order_by(
'-points', '-added_at'
)
return Paginator(answers, const.USER_POSTS_PAGE_SIZE)
def user_update_avatar_type(self):
"""counts number of custom avatars
and if zero, sets avatar_type to False,
True otherwise. The method is called only if
avatar application is installed.
Saves the object.
"""
if 'avatar' in django_settings.INSTALLED_APPS:
if self.avatar_set.count() > 0:
self.avatar_type = 'a'
else:
self.avatar_type = _check_gravatar(self.gravatar)
else:
self.avatar_type = _check_gravatar(self.gravatar)
self.save()
def user_strip_email_signature(self, text):
"""strips email signature from the end of the text"""
if self.email_signature.strip() == '':
return text
text = '\n'.join(text.splitlines())#normalize the line endings
while text.endswith(self.email_signature):
text = text[0:-len(self.email_signature)]
return text
def _check_gravatar(gravatar):
return 'n'
#todo: think of whether we need this and if so
#how to check the avatar type appropriately
gravatar_url = askbot_settings.GRAVATAR_BASE_URL + "/%s?d=404" % gravatar
code = urllib.urlopen(gravatar_url).getcode()
if urllib.urlopen(gravatar_url).getcode() != 404:
return 'g' #gravatar
else:
return 'n' #none
def user_get_old_vote_for_post(self, post):
"""returns previous vote for this post
by the user or None, if does not exist
raises assertion_error is number of old votes is > 1
which is illegal
"""
try:
return Vote.objects.get(user=self, voted_post=post)
except Vote.DoesNotExist:
return None
except Vote.MultipleObjectsReturned:
raise AssertionError
def user_get_marked_tags(self, reason):
"""reason is a type of mark: good, bad or subscribed"""
assert(reason in ('good', 'bad', 'subscribed'))
if reason == 'subscribed':
if askbot_settings.SUBSCRIBED_TAG_SELECTOR_ENABLED == False:
return Tag.objects.none()
return Tag.objects.filter(
user_selections__user=self,
user_selections__reason=reason,
language_code=get_language()
)
MARKED_TAG_PROPERTY_MAP = {
'good': 'interesting_tags',
'bad': 'ignored_tags',
'subscribed': 'subscribed_tags'
}
def user_get_marked_tag_names(self, reason):
"""returns list of marked tag names for a give
reason: good, bad, or subscribed
will add wildcard tags as well, if used
"""
if reason == 'subscribed':
if askbot_settings.SUBSCRIBED_TAG_SELECTOR_ENABLED == False:
return list()
tags = self.get_marked_tags(reason)
tag_names = list(tags.values_list('name', flat = True))
if askbot_settings.USE_WILDCARD_TAGS:
attr_name = MARKED_TAG_PROPERTY_MAP[reason]
wildcard_tags = getattr(self, attr_name).split()
tag_names.extend(wildcard_tags)
return tag_names
def user_has_affinity_to_question(self, question = None, affinity_type = None):
"""returns True if number of tag overlap of the user tag
selection with the question is 0 and False otherwise
affinity_type can be either "like" or "dislike"
"""
if affinity_type == 'like':
if askbot_settings.SUBSCRIBED_TAG_SELECTOR_ENABLED:
tag_selection_type = 'subscribed'
wildcards = self.subscribed_tags.split()
else:
tag_selection_type = 'good'
wildcards = self.interesting_tags.split()
elif affinity_type == 'dislike':
tag_selection_type = 'bad'
wildcards = self.ignored_tags.split()
else:
raise ValueError('unexpected affinity type %s' % str(affinity_type))
question_tags = question.thread.tags.all()
intersecting_tag_selections = self.tag_selections.filter(
tag__in = question_tags,
reason = tag_selection_type
)
#count number of overlapping tags
if intersecting_tag_selections.count() > 0:
return True
elif askbot_settings.USE_WILDCARD_TAGS == False:
return False
#match question tags against wildcards
for tag in question_tags:
for wildcard in wildcards:
if tag.name.startswith(wildcard[:-1]):
return True
return False
def user_has_ignored_wildcard_tags(self):
"""True if wildcard tags are on and
user has some"""
return (
askbot_settings.USE_WILDCARD_TAGS \
and self.ignored_tags != ''
)
def user_has_interesting_wildcard_tags(self):
"""True in wildcard tags aro on and
user has nome interesting wildcard tags selected
"""
return (
askbot_settings.USE_WILDCARD_TAGS \
and self.interesting_tags != ''
)
def user_has_badge(self, badge):
"""True, if user was awarded a given badge,
``badge`` is instance of BadgeData
"""
return Award.objects.filter(user=self, badge=badge).count() > 0
def user_can_create_tags(self):
"""true if user can create tags"""
if askbot_settings.ENABLE_TAG_MODERATION:
return self.is_administrator_or_moderator()
else:
return True
def user_can_have_strong_url(self):
"""True if user's homepage url can be
followed by the search engine crawlers"""
return (self.reputation >= askbot_settings.MIN_REP_TO_HAVE_STRONG_URL)
def user_can_post_by_email(self):
"""True, if reply by email is enabled
and user has sufficient reputatiton"""
if askbot_settings.REPLY_BY_EMAIL:
if self.is_administrator_or_moderator():
return True
else:
return self.reputation >= askbot_settings.MIN_REP_TO_POST_BY_EMAIL
else:
return False
def user_get_social_sharing_mode(self):
"""returns what user wants to share on his/her channels"""
mode = self.social_sharing_mode
if mode == const.SHARE_NOTHING:
return 'share-nothing'
elif mode == const.SHARE_MY_POSTS:
return 'share-my-posts'
else:
assert(mode == const.SHARE_EVERYTHING)
return 'share-everything'
def user_get_social_sharing_status(self, channel):
"""channel is only 'twitter' for now"""
assert(channel == 'twitter')
if self.twitter_handle:
if self.get_social_sharing_mode() == 'share-nothing':
return 'inactive'
else:
return 'enabled'
else:
return 'disabled'
def user_get_or_create_fake_user(self, username, email):
"""
Get's or creates a user, most likely with the purpose
of posting under that account.
"""
assert(self.is_administrator())
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User()
user.username = username
user.email = email
user.is_fake = True
user.set_unusable_password()
user.save()
return user
def get_or_create_anonymous_user():
"""returns fake anonymous user"""
username = get_name_of_anonymous_user()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = User()
user.username = username
user.email = askbot_settings.ANONYMOUS_USER_EMAIL
user.is_fake = True
user.set_unusable_password()
user.save()
return user
def user_needs_moderation(self):
if self.status not in ('a', 'm', 'd'):
choices = ('audit', 'premoderation')
return askbot_settings.CONTENT_MODERATION_MODE in choices
return False
def user_notify_users(
self, notification_type=None, recipients=None, content_object=None
):
"""A utility function that creates instance
of :class:`Activity` and adds recipients
* `notification_type` - value should be one of TYPE_ACTIVITY_...
* `recipients` - an iterable of user objects
* `content_object` - any object related to the notification
todo: possibly add checks on the content_object, depending on the
notification_type
"""
activity = Activity(
user=self,
activity_type=notification_type,
content_object=content_object
)
activity.save()
activity.add_recipients(recipients)
def user_is_read_only(self):
"""True if user is allowed to change content on the site"""
if askbot_settings.GROUPS_ENABLED:
return bool(self.get_groups().filter(read_only=True).count())
else:
return False
def user_get_notifications(self, notification_types=None, **kwargs):
"""returns query set of activity audit status objects"""
return ActivityAuditStatus.objects.filter(
user=self,
activity__activity_type__in=notification_types,
**kwargs
)
def _assert_user_can(
user=None,
post=None, #related post (may be parent)
admin_or_moderator_required=False,
owner_can=False,
action_display=None,
suspended_owner_cannot=False,
suspended_user_cannot=False,
blocked_user_cannot=False,
min_rep_setting=None
):
"""generic helper assert for use in several
User.assert_can_XYZ() calls regarding changing content
user is required and at least one error message
if assertion fails, method raises exception.PermissionDenied
with appropriate text as a payload
"""
action_display = action_display or _('perform this action')
if askbot_settings.GROUPS_ENABLED:
if user.is_read_only():
message = _('Sorry, but you have only read access')
raise django_exceptions.PermissionDenied(message)
if blocked_user_cannot and user.is_blocked():
error_message = _(message_keys.ACCOUNT_CANNOT_PERFORM_ACTION) % {
'perform_action': action_display,
'your_account_is': _('your account is blocked')
}
error_message = string_concat(error_message, '.</br> ', message_keys.PUNISHED_USER_INFO)
elif post and owner_can and user == post.get_owner():
if user.is_suspended() and suspended_owner_cannot:
error_message = _(message_keys.ACCOUNT_CANNOT_PERFORM_ACTION) % {
'perform_action': action_display,
'your_account_is': _('your account is suspended')
}
else:
return
elif suspended_user_cannot and user.is_suspended():
error_message = _(message_keys.ACCOUNT_CANNOT_PERFORM_ACTION) % {
'perform_action': action_display,
'your_account_is': _('your account is suspended')
}
elif user.is_administrator() or user.is_moderator():
return
elif user.is_post_moderator(post):
return
elif min_rep_setting and user.reputation < min_rep_setting:
raise askbot_exceptions.InsufficientReputation(
_(message_keys.MIN_REP_REQUIRED_TO_PERFORM_ACTION) % {
'perform_action': action_display,
'min_rep': min_rep_setting
}
)
elif admin_or_moderator_required:
if min_rep_setting is None:
#message about admins only
error_message = _(
'Sorry, only moderators and site administrators can %(perform_action)s'
) % {
'perform_action': action_display
}
else:
#message with minimum reputation
error_message = _(
'Sorry, only administrators, moderators '
'or users with reputation > %(min_rep)s '
'can %(perform_action)s'
) % {
'min_rep': min_rep_setting,
'perform_action': action_display
}
else:
return
assert(error_message is not None)
raise django_exceptions.PermissionDenied(error_message)
def user_assert_can_approve_post_revision(self, post_revision = None):
_assert_user_can(
user=self,
admin_or_moderator_required=True
)
def user_assert_can_unaccept_best_answer(self, answer = None):
assert getattr(answer, 'post_type', '') == 'answer'
suspended_error_message = _(message_keys.ACCOUNT_CANNOT_PERFORM_ACTION) % {
'perform_action': askbot_settings.WORDS_ACCEPT_OR_UNACCEPT_THE_BEST_ANSWER,
'your_account_is': _('your account is suspended')
}
blocked_error_message = _(message_keys.ACCOUNT_CANNOT_PERFORM_ACTION) % {
'perform_action': askbot_settings.WORDS_ACCEPT_OR_UNACCEPT_THE_BEST_ANSWER,
'your_account_is': _('your account is blocked')
}
if self.is_blocked():
error_message = blocked_error_message
elif self.is_suspended():
error_message = suspended_error_message
elif self == answer.thread._question_post().get_owner():
if self == answer.get_owner():
if not self.is_administrator():
#check rep
_assert_user_can(
user=self,
action_display=askbot_settings.WORDS_ACCEPT_OR_UNACCEPT_OWN_ANSWER,
blocked_user_cannot=True,
suspended_owner_cannot=True,
min_rep_setting = askbot_settings.MIN_REP_TO_ACCEPT_OWN_ANSWER
)
return # success
elif self.reputation >= askbot_settings.MIN_REP_TO_ACCEPT_ANY_ANSWER or \
self.is_administrator() or self.is_moderator() or self.is_post_moderator(answer):
will_be_able_at = (
answer.added_at +
datetime.timedelta(
days=askbot_settings.MIN_DAYS_FOR_STAFF_TO_ACCEPT_ANSWER)
)
if datetime.datetime.now() < will_be_able_at:
error_message = _(message_keys.CANNOT_PERFORM_ACTION_UNTIL) % {
'perform_action': askbot_settings.WORDS_ACCEPT_OR_UNACCEPT_OWN_ANSWER,
'until': will_be_able_at.strftime('%d/%m/%Y')
}
else:
return
else:
question_owner = answer.thread._question_post().get_owner()
error_message = _(message_keys.MODERATORS_OR_AUTHOR_CAN_PEFROM_ACTION) % {
'post_author': askbot_settings.WORDS_AUTHOR_OF_THE_QUESTION,
'perform_action': askbot_settings.WORDS_ACCEPT_OR_UNACCEPT_THE_BEST_ANSWER,
}
raise django_exceptions.PermissionDenied(error_message)
def user_assert_can_accept_best_answer(self, answer = None):
assert getattr(answer, 'post_type', '') == 'answer'
self.assert_can_unaccept_best_answer(answer)
def user_assert_can_vote_for_post(
self,
post = None,
direction = None,
):
"""raises exceptions.PermissionDenied exception
if user can't in fact upvote
:param:direction can be 'up' or 'down'
:param:post can be instance of question or answer
"""
if self == post.author:
raise django_exceptions.PermissionDenied(
_('Sorry, you cannot vote for your own posts')
)
assert(direction in ('up', 'down'))
if direction == 'up':
min_rep_setting = askbot_settings.MIN_REP_TO_VOTE_UP
action_display = _('upvote')
else:
min_rep_setting = askbot_settings.MIN_REP_TO_VOTE_DOWN
action_display = _('downvote')
_assert_user_can(
user=self,
action_display=action_display,
blocked_user_cannot=True,
suspended_user_cannot=True,
min_rep_setting = min_rep_setting,
)
def user_assert_can_upload_file(request_user):
_assert_user_can(
user=request_user,
action_display=_('upload files'),
blocked_user_cannot=True,
suspended_user_cannot=True,
min_rep_setting=askbot_settings.MIN_REP_TO_UPLOAD_FILES
)
def user_assert_can_merge_questions(self):
_assert_user_can(
user=self,
action_display=_('merge duplicate questions'),
admin_or_moderator_required=True
)
def user_assert_can_post_text(self, text):
"""Raises exceptions.PermissionDenied, if user does not have
privilege to post given text, depending on the contents
"""
if re.search(URL_RE, text):
min_rep = askbot_settings.MIN_REP_TO_SUGGEST_LINK
if self.is_authenticated() and self.reputation < min_rep:
message = _(
'Could not post, because your karma is insufficient to publish links'
)
raise django_exceptions.PermissionDenied(message)
def user_assert_can_post_question(self):
"""raises exceptions.PermissionDenied with
text that has the reason for the denial
"""
_assert_user_can(
user=self,
action_display=askbot_settings.WORDS_ASK_QUESTIONS,
blocked_user_cannot=True,
suspended_user_cannot=True,
)
def user_assert_can_post_answer(self, thread = None):
"""same as user_can_post_question
"""
limit_answers = askbot_settings.LIMIT_ONE_ANSWER_PER_USER
if limit_answers and thread.has_answer_by_user(self):
message = _(
'Sorry, %(you_already_gave_an_answer)s, please edit it instead.'
) % {
'you_already_gave_an_answer': askbot_settings.WORDS_YOU_ALREADY_GAVE_AN_ANSWER
}
raise askbot_exceptions.AnswerAlreadyGiven(message)
self.assert_can_post_question()
def user_assert_can_edit_comment(self, comment = None):
"""raises exceptions.PermissionDenied if user
cannot edit comment with the reason given as message
only owners, moderators or admins can edit comments
"""
if self.is_administrator() or self.is_moderator():
return
else:
if comment.author == self:
if askbot_settings.USE_TIME_LIMIT_TO_EDIT_COMMENT:
now = datetime.datetime.now()
delta_seconds = 60 * askbot_settings.MINUTES_TO_EDIT_COMMENT
if now - comment.added_at > datetime.timedelta(0, delta_seconds):
if comment.is_last():
return
error_message = ungettext(
'Sorry, comments (except the last one) are editable only '
'within %(minutes)s minute from posting',
'Sorry, comments (except the last one) are editable only '
'within %(minutes)s minutes from posting',
askbot_settings.MINUTES_TO_EDIT_COMMENT
) % {'minutes': askbot_settings.MINUTES_TO_EDIT_COMMENT}
raise django_exceptions.PermissionDenied(error_message)
return
else:
return
if not (self.is_blocked() or self.is_suspended()):
if self.reputation >= askbot_settings.MIN_REP_TO_EDIT_OTHERS_POSTS:
return
error_message = _(
'Sorry, but only post owners or moderators can edit comments'
)
raise django_exceptions.PermissionDenied(error_message)
def user_assert_can_convert_post(self, post = None):
"""raises exceptions.PermissionDenied if user is not allowed to convert the
post to another type (comment -> answer, answer -> comment)
only owners, moderators or admins can convert posts
"""
if self.is_administrator() or self.is_moderator() or post.author == self:
return
error_message = _(
'Sorry, but only post owners or moderators convert posts'
)
raise django_exceptions.PermissionDenied(error_message)
def user_can_post_comment(self, parent_post = None):
"""a simplified method to test ability to comment
"""
if self.is_administrator_or_moderator():
return True
elif self.is_suspended():
if parent_post and self == parent_post.author:
return True
else:
return False
elif self.is_blocked():
return False
return True
def user_assert_can_post_comment(self, parent_post = None):
"""raises exceptions.PermissionDenied if
user cannot post comment
the reason will be in text of exception
"""
_assert_user_can(
user=self,
post=parent_post,
action_display=_('post comments'),
owner_can=True,
blocked_user_cannot=True,
suspended_user_cannot=True,
)
def user_assert_can_see_deleted_post(self, post=None):
"""attn: this assertion is independently coded in
Question.get_answers call
"""
try:
_assert_user_can(
user=self,
post=post,
admin_or_moderator_required=True,
owner_can=True
)
except django_exceptions.PermissionDenied, e:
#re-raise the same exception with a different message
error_message = _(
'This post has been deleted and can be seen only '
'by post owners, site administrators and moderators'
)
raise django_exceptions.PermissionDenied(error_message)
def user_assert_can_edit_deleted_post(self, post = None):
assert(post.deleted == True)
try:
self.assert_can_see_deleted_post(post)
except django_exceptions.PermissionDenied, e:
error_message = _(
'Sorry, only moderators, site administrators '
'and post owners can edit deleted posts'
)
raise django_exceptions.PermissionDenied(error_message)
def user_assert_can_edit_post(self, post = None):
"""assertion that raises exceptions.PermissionDenied
when user is not authorised to edit this post
"""
if post.deleted == True:
self.assert_can_edit_deleted_post(post)
return
if post.wiki == True:
action_display=_('edit wiki posts')
min_rep_setting = askbot_settings.MIN_REP_TO_EDIT_WIKI
else:
action_display=_('edit posts')
min_rep_setting = askbot_settings.MIN_REP_TO_EDIT_OTHERS_POSTS
_assert_user_can(
user=self,
post=post,
action_display=action_display,
owner_can=True,
blocked_user_cannot=True,
suspended_user_cannot=True,
min_rep_setting = min_rep_setting
)
def user_assert_can_edit_question(self, question = None):
assert getattr(question, 'post_type', '') == 'question'
self.assert_can_edit_post(question)
def user_assert_can_edit_answer(self, answer = None):
assert getattr(answer, 'post_type', '') == 'answer'
self.assert_can_edit_post(answer)
def user_assert_can_delete_post(self, post = None):
post_type = getattr(post, 'post_type', '')
if post_type == 'question':
self.assert_can_delete_question(question = post)
elif post_type == 'answer':
self.assert_can_delete_answer(answer = post)
elif post_type == 'comment':
self.assert_can_delete_comment(comment = post)
else:
raise ValueError('Invalid post_type!')
def user_assert_can_restore_post(self, post = None):
"""can_restore_rule is the same as can_delete
"""
self.assert_can_delete_post(post = post)
def user_assert_can_delete_question(self, question = None):
"""rules are the same as to delete answer,
except if question has answers already, when owner
cannot delete unless s/he is and adinistrator or moderator
"""
#cheating here. can_delete_answer wants argument named
#"question", so the argument name is skipped
self.assert_can_delete_answer(question)
if self == question.get_owner():
#if there are answers by other people,
#then deny, unless user in admin or moderator
answer_count = question.thread.all_answers()\
.exclude(author=self).exclude(points__lte=0).count()
if answer_count > 0:
if self.is_administrator() or self.is_moderator():
return
else:
if answer_count > 1:
upvoted_answers_phrase = askbot_settings.WORDS_UPVOTED_ANSWERS
else:
upvoted_answers_phrase = askbot_settings.WORDS_UPVOTED_ANSWER
msg = ungettext(
'Sorry, cannot %(delete_your_question)s since it '
'has an %(upvoted_answers)s posted by someone else',
'Sorry, cannot %(delete_your_question)s since it '
'has some %(upvoted_answers)s posted by other users',
answer_count
) % {
'delete_your_question': askbot_settings.WORDS_DELETE_YOUR_QUESTION,
'upvoted_answers': upvoted_answers_phrase
}
raise django_exceptions.PermissionDenied(msg)
def user_assert_can_delete_answer(self, answer = None):
"""intentionally use "post" word in the messages
instead of "answer", because this logic also applies to
assert on deleting question (in addition to some special rules)
"""
min_rep_setting = askbot_settings.MIN_REP_TO_DELETE_OTHERS_POSTS
_assert_user_can(
user=self,
post=answer,
action_display=_('delete posts'),
owner_can=True,
blocked_user_cannot=True,
suspended_user_cannot=True,
min_rep_setting=min_rep_setting,
)
def user_assert_can_close_question(self, question = None):
assert(getattr(question, 'post_type', '') == 'question')
min_rep_setting = askbot_settings.MIN_REP_TO_CLOSE_OTHERS_QUESTIONS
_assert_user_can(
user = self,
post = question,
action_display=askbot_settings.WORDS_CLOSE_QUESTIONS,
owner_can = True,
suspended_owner_cannot = True,
blocked_user_cannot=True,
suspended_user_cannot=True,
min_rep_setting = min_rep_setting,
)
def user_assert_can_reopen_question(self, question = None):
assert(question.post_type == 'question')
_assert_user_can(
user=self,
post=question,
action_display=_('reopen questions'),
suspended_owner_cannot=True,
#for some reason rep to reopen own questions != rep to close own q's
min_rep_setting=askbot_settings.MIN_REP_TO_CLOSE_OTHERS_QUESTIONS,
blocked_user_cannot=True,
suspended_user_cannot=True,
)
def user_assert_can_flag_offensive(self, post = None):
assert(post is not None)
double_flagging_error_message = _(
'You have flagged this post before and '
'cannot do it more than once'
)
if self.get_flags_for_post(post).count() > 0:
raise askbot_exceptions.DuplicateCommand(double_flagging_error_message)
min_rep_setting = askbot_settings.MIN_REP_TO_FLAG_OFFENSIVE
_assert_user_can(
user = self,
post = post,
action_display=_('flag posts as offensive'),
blocked_user_cannot=True,
suspended_user_cannot=True,
min_rep_setting = min_rep_setting
)
#one extra assertion
if self.is_administrator() or self.is_moderator():
return
else:
flag_count_today = self.get_flag_count_posted_today()
if flag_count_today >= askbot_settings.MAX_FLAGS_PER_USER_PER_DAY:
flags_exceeded_error_message = _(
'Sorry, you have exhausted the maximum number of '
'%(max_flags_per_day)s offensive flags per day.'
) % {
'max_flags_per_day': \
askbot_settings.MAX_FLAGS_PER_USER_PER_DAY
}
raise django_exceptions.PermissionDenied(flags_exceeded_error_message)
def user_assert_can_remove_flag_offensive(self, post = None):
assert(post is not None)
non_existing_flagging_error_message = _('cannot remove non-existing flag')
if self.get_flags_for_post(post).count() < 1:
raise django_exceptions.PermissionDenied(non_existing_flagging_error_message)
min_rep_setting = askbot_settings.MIN_REP_TO_FLAG_OFFENSIVE
_assert_user_can(
user = self,
post = post,
action_display=_('remove flags'),
blocked_user_cannot=True,
suspended_user_cannot=True,
min_rep_setting = min_rep_setting
)
#one extra assertion
if self.is_administrator() or self.is_moderator():
return
def user_assert_can_remove_all_flags_offensive(self, post = None):
assert(post is not None)
permission_denied_message = _("you don't have the permission to remove all flags")
non_existing_flagging_error_message = _('no flags for this entry')
# Check if the post is flagged by anyone
post_content_type = ContentType.objects.get_for_model(post)
all_flags = Activity.objects.filter(
activity_type = const.TYPE_ACTIVITY_MARK_OFFENSIVE,
content_type = post_content_type, object_id=post.id
)
if all_flags.count() < 1:
raise django_exceptions.PermissionDenied(non_existing_flagging_error_message)
#one extra assertion
if self.is_administrator() or self.is_moderator():
return
else:
raise django_exceptions.PermissionDenied(permission_denied_message)
def user_assert_can_retag_question(self, question = None):
if question.deleted == True:
self.assert_can_edit_deleted_post(question)
_assert_user_can(
user=self,
post=question,
action_display=askbot_settings.WORDS_RETAG_QUESTIONS,
owner_can=True,
blocked_user_cannot=True,
suspended_user_cannot=True,
min_rep_setting=askbot_settings.MIN_REP_TO_RETAG_OTHERS_QUESTIONS
)
def user_assert_can_delete_comment(self, comment = None):
min_rep_setting = askbot_settings.MIN_REP_TO_DELETE_OTHERS_COMMENTS
_assert_user_can(
user = self,
post = comment,
action_display=_('delete comments'),
owner_can = True,
blocked_user_cannot=True,
suspended_user_cannot=True,
min_rep_setting = min_rep_setting,
)
def user_assert_can_revoke_old_vote(self, vote):
"""raises exceptions.PermissionDenied if old vote
cannot be revoked due to age of the vote
"""
if (datetime.datetime.now().day - vote.voted_at.day) \
>= askbot_settings.MAX_DAYS_TO_CANCEL_VOTE:
raise django_exceptions.PermissionDenied(
_('sorry, but older votes cannot be revoked')
)
def user_get_unused_votes_today(self):
"""returns number of votes that are
still available to the user today
"""
today = datetime.date.today()
one_day_interval = (today, today + datetime.timedelta(1))
used_votes = Vote.objects.filter(
user = self,
voted_at__range = one_day_interval
).count()
available_votes = askbot_settings.MAX_VOTES_PER_USER_PER_DAY - used_votes
return max(0, available_votes)
def user_post_comment(
self,
parent_post=None,
body_text=None,
timestamp=None,
by_email=False,
ip_addr=None,
):
"""post a comment on behalf of the user
to parent_post
"""
if body_text is None:
raise ValueError('body_text is required to post comment')
if parent_post is None:
raise ValueError('parent_post is required to post comment')
if timestamp is None:
timestamp = datetime.datetime.now()
self.assert_can_post_comment(parent_post = parent_post)
comment = parent_post.add_comment(
user=self,
comment=body_text,
added_at=timestamp,
by_email=by_email,
ip_addr=ip_addr,
)
comment.add_to_groups([self.get_personal_group()])
parent_post.thread.invalidate_cached_data()
award_badges_signal.send(
None,
event = 'post_comment',
actor = self,
context_object = comment,
timestamp = timestamp
)
return comment
def user_post_object_description(
self,
obj=None,
body_text=None,
timestamp=None
):
"""Creates an object description post and assigns it
to the given object. Returns the newly created post"""
description_post = Post.objects.create_new_tag_wiki(
author=self,
text=body_text
)
obj.description = description_post
obj.save()
return description_post
def user_post_anonymous_askbot_content(user, session_key):
"""posts any posts added just before logging in
the posts are identified by the session key, thus the second argument
this function is used by the signal handler with a similar name
"""
aq_list = AnonymousQuestion.objects.filter(session_key = session_key)
aa_list = AnonymousAnswer.objects.filter(session_key = session_key)
is_on_read_only_group = user.get_groups().filter(read_only=True).count()
if is_on_read_only_group:
user.message_set.create(message = _('Sorry, but you have only read access'))
#from askbot.conf import settings as askbot_settings
if askbot_settings.EMAIL_VALIDATION == True:#add user to the record
for aq in aq_list:
aq.author = user
aq.save()
for aa in aa_list:
aa.author = user
aa.save()
#maybe add pending posts message?
else:
for aq in aq_list:
aq.publish(user)
for aa in aa_list:
aa.publish(user)
def user_mark_tags(
self,
tagnames = None,
wildcards = None,
reason = None,
action = None
):
"""subscribe for or ignore a list of tags
* ``tagnames`` and ``wildcards`` are lists of
pure tags and wildcard tags, respectively
* ``reason`` - either "good" or "bad"
* ``action`` - eitrer "add" or "remove"
"""
cleaned_wildcards = list()
assert(action in ('add', 'remove'))
if action == 'add':
if askbot_settings.SUBSCRIBED_TAG_SELECTOR_ENABLED:
assert(reason in ('good', 'bad', 'subscribed'))
else:
assert(reason in ('good', 'bad'))
if wildcards:
cleaned_wildcards = self.update_wildcard_tag_selections(
action = action,
reason = reason,
wildcards = wildcards
)
if tagnames is None:
tagnames = list()
#figure out which tags don't yet exist
language_code = get_language()
existing_tagnames = Tag.objects.filter(
name__in=tagnames,
language_code=language_code
).values_list(
'name', flat=True
)
non_existing_tagnames = set(tagnames) - set(existing_tagnames)
#create those tags, and if tags are moderated make them suggested
if (len(non_existing_tagnames) > 0):
Tag.objects.create_in_bulk(
tag_names=tagnames,
user=self,
language_code=language_code
)
#below we update normal tag selections
marked_ts = MarkedTag.objects.filter(
user = self,
tag__name__in=tagnames,
tag__language_code=language_code
)
#Marks for "good" and "bad" reasons are exclusive,
#to make it impossible to "like" and "dislike" something at the same time
#but the subscribed set is independent - e.g. you can dislike a topic
#and still subscribe for it.
if reason == 'subscribed':
#don't touch good/bad marks
marked_ts = marked_ts.filter(reason = 'subscribed')
else:
#and in this case don't touch subscribed tags
marked_ts = marked_ts.exclude(reason = 'subscribed')
#todo: use the user api methods here instead of the straight ORM
cleaned_tagnames = list() #those that were actually updated
if action == 'remove':
logging.debug('deleting tag marks: %s' % ','.join(tagnames))
marked_ts.delete()
else:
marked_names = marked_ts.values_list('tag__name', flat = True)
if len(marked_names) < len(tagnames):
unmarked_names = set(tagnames).difference(set(marked_names))
ts = Tag.objects.filter(
name__in=unmarked_names,
language_code=language_code
)
new_marks = list()
for tag in ts:
MarkedTag(
user = self,
reason = reason,
tag = tag
).save()
new_marks.append(tag.name)
cleaned_tagnames.extend(marked_names)
cleaned_tagnames.extend(new_marks)
else:
if reason in ('good', 'bad'):#to maintain exclusivity of 'good' and 'bad'
marked_ts.update(reason=reason)
cleaned_tagnames = tagnames
return cleaned_tagnames, cleaned_wildcards
def user_merge_duplicate_questions(self, from_q, to_q):
"""merges content from the ``from_thread`` to the ``to-thread``"""
#todo: maybe assertion will depend on which questions are merged
self.assert_can_merge_questions()
to_q.merge_post(from_q)
from_thread = from_q.thread
to_thread = to_q.thread
#set new thread value to all posts
posts = from_thread.posts.all()
posts.update(thread=to_thread)
if askbot_settings.LIMIT_ONE_ANSWER_PER_USER:
#merge answers if only one is allowed per user
answers = to_thread.all_answers()
answer_map = collections.defaultdict(list)
#compile all answers by user
for answer in answers:
author = answer.author
answer_map[author].append(answer)
for author in answer_map:
author_answers = answer_map[author]
if author_answers > 1:
first_answer = author_answers.pop(0)
for answer in author_answers:
first_answer.merge_post(answer)
#from_thread.spaces.clear()
from_thread.delete()
to_thread.invalidate_cached_data()
@auto_now_timestamp
def user_retag_question(
self,
question = None,
tags = None,
timestamp = None,
silent = False
):
self.assert_can_retag_question(question)
question.thread.retag(
retagged_by = self,
retagged_at = timestamp,
tagnames = tags,
silent = silent
)
question.thread.invalidate_cached_data()
award_badges_signal.send(None,
event = 'retag_question',
actor = self,
context_object = question,
timestamp = timestamp
)
def user_repost_comment_as_answer(self, comment):
"""converts comment to answer under the
parent question"""
#todo: add assertion
self.assert_can_convert_post(comment)
comment.post_type = 'answer'
old_parent = comment.parent
comment.parent = comment.thread._question_post()
comment.save()
comment.thread.update_answer_count()
comment.parent.comment_count += 1
comment.parent.save()
#to avoid db constraint error
if old_parent.comment_count >= 1:
old_parent.comment_count -= 1
else:
old_parent.comment_count = 0
old_parent.save()
comment.thread.invalidate_cached_data()
@auto_now_timestamp
def user_accept_best_answer(
self, answer = None,
timestamp = None,
cancel = False,
force = False
):
if cancel:
return self.unaccept_best_answer(
answer = answer,
timestamp = timestamp,
force = force
)
if force == False:
self.assert_can_accept_best_answer(answer)
if answer.accepted() == True:
return
prev_accepted_answer = answer.thread.accepted_answer
if prev_accepted_answer:
auth.onAnswerAcceptCanceled(prev_accepted_answer, self)
auth.onAnswerAccept(answer, self, timestamp = timestamp)
award_badges_signal.send(None,
event = 'accept_best_answer',
actor = self,
context_object = answer,
timestamp = timestamp
)
@auto_now_timestamp
def user_unaccept_best_answer(
self, answer = None,
timestamp = None,
force = False
):
if force == False:
self.assert_can_unaccept_best_answer(answer)
if not answer.accepted():
return
auth.onAnswerAcceptCanceled(answer, self)
@auto_now_timestamp
def user_delete_comment(
self,
comment = None,
timestamp = None
):
self.assert_can_delete_comment(comment = comment)
#todo: we want to do this
#comment.deleted = True
#comment.deleted_by = self
#comment.deleted_at = timestamp
#comment.save()
comment.delete()
comment.thread.invalidate_cached_data()
@auto_now_timestamp
def user_delete_answer(
self,
answer = None,
timestamp = None
):
self.assert_can_delete_answer(answer = answer)
answer.deleted = True
answer.deleted_by = self
answer.deleted_at = timestamp
answer.save()
answer.thread.update_answer_count()
answer.thread.update_last_activity_info()
answer.thread.invalidate_cached_data()
logging.debug('updated answer count to %d' % answer.thread.answer_count)
signals.delete_question_or_answer.send(
sender = answer.__class__,
instance = answer,
delete_by = self
)
award_badges_signal.send(None,
event = 'delete_post',
actor = self,
context_object = answer,
timestamp = timestamp
)
@auto_now_timestamp
def user_delete_question(
self,
question = None,
timestamp = None
):
self.assert_can_delete_question(question = question)
question.deleted = True
question.deleted_by = self
question.deleted_at = timestamp
question.save()
question.thread.deleted = True
question.thread.save()
for tag in list(question.thread.tags.all()):
if tag.used_count <= 1:
tag.used_count = 0
tag.deleted = True
tag.deleted_by = self
tag.deleted_at = timestamp
else:
tag.used_count = tag.used_count - 1
tag.save()
signals.delete_question_or_answer.send(
sender = question.__class__,
instance = question,
delete_by = self
)
award_badges_signal.send(None,
event = 'delete_post',
actor = self,
context_object = question,
timestamp = timestamp
)
def user_delete_all_content_authored_by_user(self, author, timestamp=None):
"""Deletes all questions, answers and comments made by the user"""
count = 0
#delete answers
answers = Post.objects.get_answers().filter(author=author)
timestamp = timestamp or datetime.datetime.now()
count += answers.update(deleted_at=timestamp, deleted_by=self, deleted=True)
#delete questions
questions = Post.objects.get_questions().filter(author=author)
count += questions.count()
for question in questions:
self.delete_question(question=question, timestamp=timestamp)
threads = Thread.objects.filter(last_activity_by=author)
for thread in threads:
thread.update_last_activity_info()
#delete threads
thread_ids = questions.values_list('thread_id', flat=True)
#load second time b/c threads above are not quite real
threads = Thread.objects.filter(id__in=thread_ids)
threads.update(deleted=True)
for thread in threads:
thread.invalidate_cached_data()
#delete comments
comments = Post.objects.get_comments().filter(author=author)
count += comments.count()
comments.delete()
#delete all unused tags created by this user
#tags = author.created_tags.all()
#tag_ids = list()
#for tag in tags:
# if tag.used_count == 0:
# tag_ids.append(tag.id)
#Tag.objects.filter(id__in=tag_ids).delete()
return count
@auto_now_timestamp
def user_close_question(
self,
question = None,
reason = None,
timestamp = None
):
self.assert_can_close_question(question)
question.thread.set_closed_status(closed=True, closed_by=self, closed_at=timestamp, close_reason=reason)
@auto_now_timestamp
def user_reopen_question(
self,
question = None,
timestamp = None
):
self.assert_can_reopen_question(question)
question.thread.set_closed_status(closed=False, closed_by=self, closed_at=timestamp, close_reason=None)
@auto_now_timestamp
def user_delete_post(
self,
post = None,
timestamp = None
):
"""generic delete method for all kinds of posts
if there is no use cases for it, the method will be removed
"""
if post.post_type == 'comment':
self.delete_comment(comment = post, timestamp = timestamp)
elif post.post_type == 'answer':
self.delete_answer(answer = post, timestamp = timestamp)
elif post.post_type == 'question':
self.delete_question(question = post, timestamp = timestamp)
else:
raise TypeError('either Comment, Question or Answer expected')
post.thread.invalidate_cached_data()
def user_restore_post(
self,
post = None,
timestamp = None
):
#here timestamp is not used, I guess added for consistency
self.assert_can_restore_post(post)
if post.post_type in ('question', 'answer'):
post.deleted = False
post.deleted_by = None
post.deleted_at = None
post.save()
if post.post_type == 'question':
post.thread.deleted = False
post.thread.save()
post.thread.invalidate_cached_data()
if post.post_type == 'answer':
post.thread.update_answer_count()
post.thread.update_last_activity_info()
else:
#todo: make sure that these tags actually exist
#some may have since been deleted for good
#or merged into others
for tag in list(post.thread.tags.all()):
if tag.used_count == 1 and tag.deleted:
tag.deleted = False
tag.deleted_by = None
tag.deleted_at = None
tag.save()
else:
raise NotImplementedError()
def user_post_question(
self,
title=None,
body_text='',
tags=None,
wiki=False,
is_anonymous=False,
is_private=False,
group_id=None,
timestamp=None,
by_email=False,
email_address=None,
language=None,
ip_addr=None,
):
"""makes an assertion whether user can post the question
then posts it and returns the question object"""
self.assert_can_post_question()
if body_text == '':#a hack to allow bodyless question
body_text = ' '
if title is None:
raise ValueError('Title is required to post question')
if tags is None:
raise ValueError('Tags are required to post question')
if timestamp is None:
timestamp = datetime.datetime.now()
#todo: split this into "create thread" + "add question", if text exists
#or maybe just add a blank question post anyway
thread = Thread.objects.create_new(
author=self,
title=title,
text=body_text,
tagnames=tags,
added_at=timestamp,
wiki=wiki,
is_anonymous=is_anonymous,
is_private=is_private,
group_id=group_id,
by_email=by_email,
email_address=email_address,
language=language,
ip_addr=ip_addr
)
question = thread._question_post()
if question.author != self:
raise ValueError('question.author != self')
question.author = self # HACK: Some tests require that question.author IS exactly the same object as self-user (kind of identity map which Django doesn't provide),
# because they set some attributes for that instance and expect them to be changed also for question.author
if askbot_settings.AUTO_FOLLOW_QUESTION_BY_OP:
self.toggle_favorite_question(question)
return question
@auto_now_timestamp
def user_edit_comment(
self,
comment_post=None,
body_text=None,
timestamp=None,
by_email=False,
suppress_email=False,
ip_addr=None,
):
"""apply edit to a comment, the method does not
change the comments timestamp and no signals are sent
todo: see how this can be merged with edit_post
todo: add timestamp
"""
self.assert_can_edit_comment(comment_post)
revision = comment_post.apply_edit(
text=body_text,
edited_at=timestamp,
edited_by=self,
by_email=by_email,
suppress_email=suppress_email,
ip_addr=ip_addr,
)
comment_post.thread.invalidate_cached_data()
return revision
def user_edit_post(self,
post=None,
body_text=None,
revision_comment=None,
timestamp=None,
by_email=False,
is_private=False,
suppress_email=False,
ip_addr=None
):
"""a simple method that edits post body
todo: unify it in the style of just a generic post
this requires refactoring of underlying functions
because we cannot bypass the permissions checks set within
"""
if post.post_type == 'comment':
return self.edit_comment(
comment_post=post,
body_text=body_text,
by_email=by_email,
suppress_email=suppress_email,
ip_addr=ip_addr
)
elif post.post_type == 'answer':
return self.edit_answer(
answer=post,
body_text=body_text,
timestamp=timestamp,
revision_comment=revision_comment,
by_email=by_email,
suppress_email=suppress_email,
ip_addr=ip_addr
)
elif post.post_type == 'question':
return self.edit_question(
question=post,
body_text=body_text,
timestamp=timestamp,
revision_comment=revision_comment,
by_email=by_email,
is_private=is_private,
suppress_email=suppress_email,
ip_addr=ip_addr
)
elif post.post_type == 'tag_wiki':
return post.apply_edit(
edited_at=timestamp,
edited_by=self,
text=body_text,
#todo: summary name clash in question and question revision
comment=revision_comment,
wiki=True,
by_email=False,
ip_addr=ip_addr,
)
else:
raise NotImplementedError()
@auto_now_timestamp
def user_edit_question(
self,
question=None,
title=None,
body_text=None,
revision_comment=None,
tags=None,
wiki=False,
edit_anonymously=False,
is_private=False,
timestamp=None,
force=False,#if True - bypass the assert
by_email=False,
suppress_email=False,
ip_addr=None,
):
if force == False:
self.assert_can_edit_question(question)
revision = question.apply_edit(
edited_at=timestamp,
edited_by=self,
title=title,
text=body_text,
#todo: summary name clash in question and question revision
comment=revision_comment,
tags=tags,
wiki=wiki,
edit_anonymously=edit_anonymously,
is_private=is_private,
by_email=by_email,
suppress_email=suppress_email,
ip_addr=ip_addr
)
question.thread.invalidate_cached_data()
award_badges_signal.send(None,
event = 'edit_question',
actor = self,
context_object = question,
timestamp = timestamp
)
return revision
@auto_now_timestamp
def user_edit_answer(
self,
answer=None,
body_text=None,
revision_comment=None,
wiki=False,
is_private=False,
timestamp=None,
force=False,#if True - bypass the assert
by_email=False,
suppress_email=False,
ip_addr=None,
):
if force == False:
self.assert_can_edit_answer(answer)
revision = answer.apply_edit(
edited_at=timestamp,
edited_by=self,
text=body_text,
comment=revision_comment,
wiki=wiki,
is_private=is_private,
by_email=by_email,
suppress_email=suppress_email,
ip_addr=ip_addr,
)
answer.thread.invalidate_cached_data()
award_badges_signal.send(None,
event = 'edit_answer',
actor = self,
context_object = answer,
timestamp = timestamp
)
return revision
@auto_now_timestamp
def user_create_post_reject_reason(
self, title = None, details = None, timestamp = None
):
"""creates and returs the post reject reason"""
reason = PostFlagReason(
title = title,
added_at = timestamp,
author = self
)
#todo - need post_object.create_new() method
details = Post(
post_type = 'reject_reason',
author = self,
added_at = timestamp,
text = details
)
details.parse_and_save(author=self)
details.add_revision(
author = self,
revised_at = timestamp,
text = details,
comment = unicode(const.POST_STATUS['default_version'])
)
reason.details = details
reason.save()
return reason
@auto_now_timestamp
def user_edit_post_reject_reason(
self, reason, title = None, details = None, timestamp = None
):
reason.title = title
reason.save()
return reason.details.apply_edit(
edited_by = self,
edited_at = timestamp,
text = details
)
def user_post_answer(
self,
question=None,
body_text=None,
follow=False,
wiki=False,
is_private=False,
timestamp=None,
by_email=False,
ip_addr=None,
):
#todo: move this to assertion - user_assert_can_post_answer
if self == question.author and not self.is_administrator():
# check date and rep required to post answer to own question
delta = datetime.timedelta(askbot_settings.MIN_DAYS_TO_ANSWER_OWN_QUESTION)
now = datetime.datetime.now()
asked = question.added_at
#todo: this is an assertion, must be moved out
if (now - asked < delta and self.reputation < askbot_settings.MIN_REP_TO_ANSWER_OWN_QUESTION):
diff = asked + delta - now
days = diff.days
hours = int(diff.seconds/3600)
minutes = int(diff.seconds/60)
if days > 2:
if asked.year == now.year:
date_token = asked.strftime("%b %d")
else:
date_token = asked.strftime("%b %d '%y")
left = _('on %(date)s') % { 'date': date_token }
elif days == 2:
left = _('in two days')
elif days == 1:
left = _('tomorrow')
elif minutes >= 60:
left = ungettext('in %(hr)d hour','in %(hr)d hours',hours) % {'hr':hours}
else:
left = ungettext('in %(min)d min','in %(min)d mins',minutes) % {'min':minutes}
day = ungettext('%(days)d day','%(days)d days',askbot_settings.MIN_DAYS_TO_ANSWER_OWN_QUESTION) % {'days':askbot_settings.MIN_DAYS_TO_ANSWER_OWN_QUESTION}
error_message = _(
'New users must wait %(days)s to %(answer_own_questions)s. '
' You can post an answer %(left)s'
) % {
'days': day,
'left': left,
'answer_own_questions': askbot_settings.WORDS_ANSWER_OWN_QUESTIONS
}
assert(error_message is not None)
raise django_exceptions.PermissionDenied(error_message)
self.assert_can_post_answer(thread = question.thread)
if getattr(question, 'post_type', '') != 'question':
raise TypeError('question argument must be provided')
if body_text is None:
raise ValueError('Body text is required to post answer')
if timestamp is None:
timestamp = datetime.datetime.now()
# answer = Answer.objects.create_new(
# thread = question.thread,
# author = self,
# text = body_text,
# added_at = timestamp,
# email_notify = follow,
# wiki = wiki
# )
answer_post = Post.objects.create_new_answer(
thread=question.thread,
author=self,
text=body_text,
added_at=timestamp,
email_notify=follow,
wiki=wiki,
is_private=is_private,
by_email=by_email,
ip_addr=ip_addr,
)
#add to the answerer's group
answer_post.add_to_groups([self.get_personal_group()])
answer_post.thread.invalidate_cached_data()
award_badges_signal.send(None,
event = 'post_answer',
actor = self,
context_object = answer_post
)
return answer_post
def user_visit_question(self, question = None, timestamp = None):
"""create a QuestionView record
on behalf of the user represented by the self object
and mark it as taking place at timestamp time
and remove pending on-screen notifications about anything in
the post - question, answer or comments
"""
if timestamp is None:
timestamp = datetime.datetime.now()
try:
QuestionView.objects.filter(
who=self, question=question
).update(
when = timestamp
)
except QuestionView.DoesNotExist:
QuestionView(
who=self,
question=question,
when = timestamp
).save()
#filter memo objects on response activities directed to the qurrent user
#that refer to the children of the currently
#viewed question and clear them for the current user
ACTIVITY_TYPES = const.RESPONSE_ACTIVITY_TYPES_FOR_DISPLAY
ACTIVITY_TYPES += (const.TYPE_ACTIVITY_MENTION,)
audit_records = ActivityAuditStatus.objects.filter(
user = self,
status = ActivityAuditStatus.STATUS_NEW,
activity__question = question
)
cleared_record_count = audit_records.filter(
activity__activity_type__in = ACTIVITY_TYPES
).update(
status=ActivityAuditStatus.STATUS_SEEN
)
if cleared_record_count > 0:
self.update_response_counts()
#finally, mark admin memo objects if applicable
#the admin response counts are not denormalized b/c they are easy to obtain
if self.is_moderator() or self.is_administrator():
audit_records.filter(
activity__activity_type = const.TYPE_ACTIVITY_MARK_OFFENSIVE
).update(
status=ActivityAuditStatus.STATUS_SEEN
)
def user_is_username_taken(cls,username):
try:
cls.objects.get(username=username)
return True
except cls.MultipleObjectsReturned:
return True
except cls.DoesNotExist:
return False
def user_is_administrator(self):
"""checks whether user in the forum site administrator
the admin must be both superuser and staff member
the latter is because staff membership is required
to access the live settings"""
return (self.is_superuser and self.is_staff)
def user_remove_admin_status(self):
self.is_staff = False
self.is_superuser = False
def user_set_admin_status(self):
self.is_staff = True
self.is_superuser = True
def user_add_missing_askbot_subscriptions(self):
from askbot import forms#need to avoid circular dependency
form = forms.EditUserEmailFeedsForm()
need_feed_types = form.get_db_model_subscription_type_names()
have_feed_types = EmailFeedSetting.objects.filter(
subscriber = self
).values_list(
'feed_type', flat = True
)
missing_feed_types = set(need_feed_types) - set(have_feed_types)
for missing_feed_type in missing_feed_types:
attr_key = 'DEFAULT_NOTIFICATION_DELIVERY_SCHEDULE_%s' % missing_feed_type.upper()
freq = getattr(askbot_settings, attr_key)
feed_setting = EmailFeedSetting(
subscriber = self,
feed_type = missing_feed_type,
frequency = freq
)
feed_setting.save()
def user_is_moderator(self):
return (self.status == 'm' and self.is_administrator() == False)
def user_is_post_moderator(self, post):
"""True, if user and post have common groups
with moderation privilege"""
if askbot_settings.GROUPS_ENABLED:
group_ids = self.get_groups().values_list('id', flat=True)
post_groups = PostToGroup.objects.filter(post=post, group__id__in=group_ids)
return post_groups.filter(group__is_vip=True).count() > 0
else:
return False
def user_is_administrator_or_moderator(self):
return (self.is_administrator() or self.is_moderator())
def user_is_suspended(self):
return (self.status == 's')
def user_is_blocked(self):
return (self.status == 'b')
def user_is_watched(self):
return (self.status == 'w')
def user_is_approved(self):
return (self.status == 'a')
def user_is_owner_of(self, obj):
"""True if user owns object
False otherwise
"""
if isinstance(obj, Post) and obj.post_type == 'question':
return self == obj.author
else:
raise NotImplementedError()
def get_name_of_anonymous_user():
"""Returns name of the anonymous user
either comes from the live settyngs or the language
translation
very possible that this function does not belong here
"""
if askbot_settings.NAME_OF_ANONYMOUS_USER:
return askbot_settings.NAME_OF_ANONYMOUS_USER
else:
return _('Anonymous')
def user_get_anonymous_name(self):
"""Returns name of anonymous user
- convinience method for use in the template
macros that accept user as parameter
"""
return get_name_of_anonymous_user()
def user_set_status(self, new_status):
"""sets new status to user
this method understands that administrator status is
stored in the User.is_superuser field, but
everything else in User.status field
there is a slight aberration - administrator status
can be removed, but not added yet
if new status is applied to user, then the record is
committed to the database
"""
#d - administrator
#m - moderator
#s - suspended
#b - blocked
#w - watched
#a - approved (regular user)
assert(new_status in ('d', 'm', 's', 'b', 'w', 'a'))
if new_status == self.status:
return
#clear admin status if user was an administrator
#because this function is not dealing with the site admins
if new_status == 'd':
#create a new admin
self.set_admin_status()
else:
#This was the old method, kept in the else clause when changing
#to admin, so if you change the status to another thing that
#is not Administrator it will simply remove admin if the user have
#that permission, it will mostly be false.
if self.is_administrator():
self.remove_admin_status()
#when toggling between blocked and non-blocked status
#we need to invalidate question page caches, b/c they contain
#user's url, which must be hidden in the blocked state
if 'b' in (new_status, self.status) and new_status != self.status:
threads = Thread.objects.get_for_user(self)
for thread in threads:
thread.invalidate_cached_post_data()
self.status = new_status
self.save()
@auto_now_timestamp
def user_moderate_user_reputation(
self,
user = None,
reputation_change = 0,
comment = None,
timestamp = None
):
"""add or subtract reputation of other user
"""
if reputation_change == 0:
return
if comment == None:
raise ValueError('comment is required to moderate user reputation')
new_rep = user.reputation + reputation_change
if new_rep < 1:
new_rep = 1 #todo: magic number
reputation_change = 1 - user.reputation
user.reputation = new_rep
user.save()
#any question. This is necessary because reputes are read in the
#user_reputation view with select_related('question__title') and it fails if
#ForeignKey is nullable even though it should work (according to the manual)
#probably a bug in the Django ORM
#fake_question = Question.objects.all()[:1][0]
#so in cases where reputation_type == 10
#question record is fake and is ignored
#this bug is hidden in call Repute.get_explanation_snippet()
repute = Repute(
user = user,
comment = comment,
#question = fake_question,
reputed_at = timestamp,
reputation_type = 10, #todo: fix magic number
reputation = user.reputation
)
if reputation_change < 0:
repute.negative = -1 * reputation_change
else:
repute.positive = reputation_change
repute.save()
def user_get_status_display(self):
if self.is_approved():
return _('Registered User')
elif self.is_administrator():
return _('Administrator')
elif self.is_moderator():
return _('Moderator')
elif self.is_suspended():
return _('Suspended User')
elif self.is_blocked():
return _('Blocked User')
elif self.is_watched():
return _('New User')
else:
raise ValueError('Unknown user status')
def user_can_moderate_user(self, other):
if self.is_administrator():
return True
elif self.is_moderator():
if other.is_moderator() or other.is_administrator():
return False
else:
return True
else:
return False
def user_get_followed_question_alert_frequency(self):
feed_setting, created = EmailFeedSetting.objects.get_or_create(
subscriber=self,
feed_type='q_sel'
)
return feed_setting.frequency
def user_subscribe_for_followed_question_alerts(self):
"""turns on daily subscription for selected questions
otherwise does nothing
Returns ``True`` if the subscription was turned on and
``False`` otherwise
"""
feed_setting, created = EmailFeedSetting.objects.get_or_create(
subscriber = self,
feed_type = 'q_sel'
)
if feed_setting.frequency == 'n':
feed_setting.frequency = 'd'
feed_setting.save()
return True
return False
def user_get_tag_filtered_questions(self, questions = None):
"""Returns a query set of questions, tag filtered according
to the user choices. Parameter ``questions`` can be either ``None``
or a starting query set.
"""
if questions is None:
questions = Post.objects.get_questions()
language_code = get_language()
if self.email_tag_filter_strategy == const.EXCLUDE_IGNORED:
ignored_tags = Tag.objects.filter(
user_selections__reason = 'bad',
user_selections__user = self,
language_code=language_code
)
wk = self.ignored_tags.strip().split()
ignored_by_wildcards = Tag.objects.get_by_wildcards(wk)
return questions.exclude(
thread__tags__in = ignored_tags
).exclude(
thread__tags__in = ignored_by_wildcards
).distinct()
elif self.email_tag_filter_strategy == const.INCLUDE_INTERESTING:
if askbot_settings.SUBSCRIBED_TAG_SELECTOR_ENABLED:
reason = 'subscribed'
wk = self.subscribed_tags.strip().split()
else:
reason = 'good'
wk = self.interesting_tags.strip().split()
selected_tags = Tag.objects.filter(
user_selections__reason = reason,
user_selections__user = self,
language_code=language_code
)
selected_by_wildcards = Tag.objects.get_by_wildcards(wk)
tag_filter = models.Q(thread__tags__in = list(selected_tags)) \
| models.Q(thread__tags__in = list(selected_by_wildcards))
return questions.filter( tag_filter ).distinct()
else:
return questions
def get_messages(self):
messages = []
for m in self.message_set.all():
messages.append(m.message)
return messages
def delete_messages(self):
self.message_set.all().delete()
#todo: find where this is used and replace with get_absolute_url
def user_get_profile_url(self, profile_section=None):
"""Returns the URL for this User's profile."""
url = reverse(
'user_profile',
kwargs={'id':self.id, 'slug':slugify(self.username)}
)
if profile_section:
url += "?sort=" + profile_section
return url
def user_get_absolute_url(self):
return self.get_profile_url()
def user_get_primary_language(self):
if getattr(django_settings, 'ASKBOT_MULTILINGUAL', False):
return django_settings.LANGUAGE_CODE
else:
return self.languages.split()[0]
def get_profile_link(self):
profile_link = u'<a href="%s">%s</a>' \
% (self.get_profile_url(), escape(self.username))
return mark_safe(profile_link)
def user_get_groups(self, private=False):
"""returns a query set of groups to which user belongs"""
#todo: maybe cache this query
return Group.objects.get_for_user(self, private=private)
def user_get_personal_group(self):
group_name = format_personal_group_name(self)
return Group.objects.get(name=group_name)
def user_get_foreign_groups(self):
"""returns a query set of groups to which user does not belong"""
#todo: maybe cache this query
user_group_ids = self.get_groups().values_list('id', flat = True)
return Group.objects.exclude(id__in = user_group_ids)
def user_get_primary_group(self):
"""a temporary function - returns ether None or
first non-personal non-everyone group
works only for one real private group per-person
"""
if askbot_settings.GROUPS_ENABLED:
groups = self.get_groups(private=True)
for group in groups:
if group.is_personal():
continue
return group
return None
def user_can_make_group_private_posts(self):
"""simplest implementation: user belongs to at least one group"""
return (self.get_primary_group() != None)
def user_get_group_membership(self, group):
"""returns a group membership object or None
if it is not there
"""
try:
return GroupMembership.objects.get(user=self, group=group)
except GroupMembership.DoesNotExist:
return None
def user_get_groups_membership_info(self, groups):
"""returns a defaultdict with values that are
dictionaries with the following keys and values:
* key: acceptance_level, value: 'closed', 'moderated', 'open'
* key: membership_level, value: 'none', 'pending', 'full'
``groups`` is a group tag query set
"""
group_ids = groups.values_list('id', flat = True)
memberships = GroupMembership.objects.filter(
user__id = self.id,
group__id__in = group_ids
)
info = collections.defaultdict(
lambda: {'acceptance_level': 'closed', 'membership_level': 'none'}
)
for membership in memberships:
membership_level = membership.get_level_display()
info[membership.group_id]['membership_level'] = membership_level
for group in groups:
info[group.id]['acceptance_level'] = group.get_openness_level_for_user(self)
return info
def user_get_karma_summary(self):
"""returns human readable sentence about
status of user's karma"""
return _("%(username)s karma is %(reputation)s") % \
{'username': self.username, 'reputation': self.reputation}
def user_get_badge_summary(self):
"""returns human readable sentence about
number of badges of different levels earned
by the user. It is assumed that user has some badges"""
if self.gold + self.silver + self.bronze == 0:
return ''
badge_bits = list()
if self.gold:
bit = ungettext(
'one gold badge',
'%(count)d gold badges',
self.gold
) % {'count': self.gold}
badge_bits.append(bit)
if self.silver:
bit = ungettext(
'one silver badge',
'%(count)d silver badges',
self.silver
) % {'count': self.silver}
badge_bits.append(bit)
if self.bronze:
bit = ungettext(
'one bronze badge',
'%(count)d bronze badges',
self.bronze
) % {'count': self.bronze}
badge_bits.append(bit)
if len(badge_bits) == 1:
badge_str = badge_bits[0]
elif len(badge_bits) > 1:
last_bit = badge_bits.pop()
badge_str = ', '.join(badge_bits)
badge_str = _('%(item1)s and %(item2)s') % \
{'item1': badge_str, 'item2': last_bit}
return _("%(user)s has %(badges)s") % {'user': self.username, 'badges':badge_str}
#series of methods for user vote-type commands
#same call signature func(self, post, timestamp=None, cancel=None)
#note that none of these have business logic checks internally
#these functions are used by the askbot app and
#by the data importer jobs from say stackexchange, where internal rules
#may be different
#maybe if we do use business rule checks here - we should add
#some flag allowing to bypass them for things like the data importers
def toggle_favorite_question(
self, question,
timestamp = None,
cancel = False,
force = False#this parameter is not used yet
):
"""cancel has no effect here, but is important for the SE loader
it is hoped that toggle will work and data will be consistent
but there is no guarantee, maybe it's better to be more strict
about processing the "cancel" option
another strange thing is that this function unlike others below
returns a value
todo: the on-screen follow and email subscription is not
fully merged yet - see use of FavoriteQuestion and follow/unfollow question
btw, names of the objects/methods is quite misleading ATM
"""
try:
#this attempts to remove the on-screen follow
fave = FavoriteQuestion.objects.get(thread=question.thread, user=self)
fave.delete()
result = False
question.thread.update_favorite_count()
#this removes email subscription
if question.thread.is_followed_by(self):
self.unfollow_question(question)
except FavoriteQuestion.DoesNotExist:
if timestamp is None:
timestamp = datetime.datetime.now()
fave = FavoriteQuestion(
thread = question.thread,
user = self,
added_at = timestamp,
)
fave.save()
#this removes email subscription
if question.thread.is_followed_by(self) is False:
self.follow_question(question)
result = True
question.thread.update_favorite_count()
award_badges_signal.send(None,
event = 'select_favorite_question',
actor = self,
context_object = question,
timestamp = timestamp
)
return result
VOTES_TO_EVENTS = {
(Vote.VOTE_UP, 'answer'): 'upvote_answer',
(Vote.VOTE_UP, 'question'): 'upvote_question',
(Vote.VOTE_DOWN, 'question'): 'downvote',
(Vote.VOTE_DOWN, 'answer'): 'downvote',
(Vote.VOTE_UP, 'comment'): 'upvote_comment',
}
@auto_now_timestamp
def _process_vote(user, post, timestamp=None, cancel=False, vote_type=None):
""""private" wrapper function that applies post upvotes/downvotes
and cancelations
"""
#get or create the vote object
#return with noop in some situations
try:
vote = Vote.objects.get(user = user, voted_post=post)
except Vote.DoesNotExist:
vote = None
if cancel:
if vote == None:
return
elif vote.is_opposite(vote_type):
return
else:
#we would call vote.delete() here
#but for now all that is handled by the
#legacy askbot.auth functions
#vote.delete()
pass
else:
if vote == None:
vote = Vote(
user = user,
voted_post=post,
vote = vote_type,
voted_at = timestamp,
)
elif vote.is_opposite(vote_type):
vote.vote = vote_type
else:
return
#do the actual work
if vote_type == Vote.VOTE_UP:
if cancel:
auth.onUpVotedCanceled(vote, post, user, timestamp)
else:
auth.onUpVoted(vote, post, user, timestamp)
elif vote_type == Vote.VOTE_DOWN:
if cancel:
auth.onDownVotedCanceled(vote, post, user, timestamp)
else:
auth.onDownVoted(vote, post, user, timestamp)
post.thread.invalidate_cached_data()
if post.post_type == 'question':
#denormalize the question post score on the thread
post.thread.points = post.points
post.thread.save()
post.thread.update_summary_html()
if cancel:
return None
event = VOTES_TO_EVENTS.get((vote_type, post.post_type), None)
if event:
award_badges_signal.send(None,
event = event,
actor = user,
context_object = post,
timestamp = timestamp
)
return vote
def user_fix_html_links(self, text):
"""depending on the user's privilege, allow links
and hotlinked images or replace them with plain text
url
"""
is_simple_user = not self.is_administrator_or_moderator()
has_low_rep = self.reputation < askbot_settings.MIN_REP_TO_INSERT_LINK
if is_simple_user and has_low_rep:
result = replace_links_with_text(text)
if result != text:
message = ungettext(
'At least %d karma point is required to post links',
'At least %d karma points is required to post links',
askbot_settings.MIN_REP_TO_INSERT_LINK
) % askbot_settings.MIN_REP_TO_INSERT_LINK
self.message_set.create(message=message)
return result
return text
def user_unfollow_question(self, question = None):
self.followed_threads.remove(question.thread)
def user_follow_question(self, question = None):
self.followed_threads.add(question.thread)
def user_is_following_question(user, question):
"""True if user is following a question"""
return question.thread.followed_by.filter(id=user.id).exists()
def upvote(self, post, timestamp=None, cancel=False, force=False):
#force parameter not used yet
return _process_vote(
self,
post,
timestamp=timestamp,
cancel=cancel,
vote_type=Vote.VOTE_UP
)
def downvote(self, post, timestamp=None, cancel=False, force=False):
#force not used yet
return _process_vote(
self,
post,
timestamp=timestamp,
cancel=cancel,
vote_type=Vote.VOTE_DOWN
)
@auto_now_timestamp
def user_approve_post_revision(user, post_revision, timestamp = None):
"""approves the post revision and, if necessary,
the parent post and threads"""
user.assert_can_approve_post_revision()
post_revision.approved = True
post_revision.approved_by = user
post_revision.approved_at = timestamp
post = post_revision.post
#approval of unpublished revision
if post_revision.revision == 0:
post_revision.revision = post.get_latest_revision_number() + 1
post_revision.save()
if post.approved == False:
if post.is_comment():
post.parent.comment_count += 1
post.parent.save()
elif post.is_answer():
post.thread.answer_count += 1
post.thread.save()
post.approved = True
post.text = post_revision.text
post_is_new = (post.revisions.count() == 1)
parse_results = post.parse_and_save(author=post_revision.author)
signals.post_updated.send(
post=post,
updated_by=post_revision.author,
newly_mentioned_users=parse_results['newly_mentioned_users'],
#suppress_email=suppress_email,
timestamp=timestamp,
created=post_is_new,
diff=parse_results['diff'],
sender=post.__class__
)
if post_revision.post.post_type == 'question':
thread = post.thread
thread.approved = True
thread.save()
post.thread.invalidate_cached_data()
#send the signal of published revision
signals.post_revision_published.send(
None,
revision=post_revision,
was_approved=True
)
@auto_now_timestamp
def flag_post(
user, post, timestamp=None, cancel=False, cancel_all=False, force=False
):
if cancel_all:
# remove all flags
if force == False:
user.assert_can_remove_all_flags_offensive(post=post)
post_content_type = ContentType.objects.get_for_model(post)
all_flags = Activity.objects.filter(
activity_type=const.TYPE_ACTIVITY_MARK_OFFENSIVE,
content_type=post_content_type,
object_id=post.id
)
for flag in all_flags:
auth.onUnFlaggedItem(post, flag.user, timestamp=timestamp)
elif cancel:#todo: can't unflag?
if force == False:
user.assert_can_remove_flag_offensive(post = post)
auth.onUnFlaggedItem(post, user, timestamp=timestamp)
else:
if force == False:
user.assert_can_flag_offensive(post=post)
auth.onFlaggedItem(post, user, timestamp=timestamp)
award_badges_signal.send(None,
event = 'flag_post',
actor = user,
context_object = post,
timestamp = timestamp
)
def user_get_flags(self):
"""return flag Activity query set
for all flags set by te user"""
return Activity.objects.filter(
user = self,
activity_type = const.TYPE_ACTIVITY_MARK_OFFENSIVE
)
def user_get_flag_count_posted_today(self):
"""return number of flags the user has posted
within last 24 hours"""
today = datetime.date.today()
time_frame = (today, today + datetime.timedelta(1))
flags = self.get_flags()
return flags.filter(active_at__range = time_frame).count()
def user_get_flags_for_post(self, post):
"""return query set for flag Activity items
posted by users for a given post obeject
"""
post_content_type = ContentType.objects.get_for_model(post)
flags = self.get_flags()
return flags.filter(content_type = post_content_type, object_id=post.id)
def user_update_response_counts(user):
"""Recount number of responses to the user.
"""
ACTIVITY_TYPES = const.RESPONSE_ACTIVITY_TYPES_FOR_DISPLAY
ACTIVITY_TYPES += (const.TYPE_ACTIVITY_MENTION,)
user.new_response_count = ActivityAuditStatus.objects.filter(
user = user,
status = ActivityAuditStatus.STATUS_NEW,
activity__activity_type__in = ACTIVITY_TYPES
).count()
user.seen_response_count = ActivityAuditStatus.objects.filter(
user = user,
status = ActivityAuditStatus.STATUS_SEEN,
activity__activity_type__in = ACTIVITY_TYPES
).count()
user.save()
def user_receive_reputation(self, num_points):
old_points = self.reputation
new_points = old_points + num_points
if new_points > 0:
self.reputation = new_points
else:
self.reputation = const.MIN_REPUTATION
signals.reputation_received.send(None, user=self, reputation_before=old_points)
def user_update_wildcard_tag_selections(
self,
action = None,
reason = None,
wildcards = None,
):
"""updates the user selection of wildcard tags
and saves the user object to the database
"""
if askbot_settings.SUBSCRIBED_TAG_SELECTOR_ENABLED:
assert reason in ('good', 'bad', 'subscribed')
else:
assert reason in ('good', 'bad')
new_tags = set(wildcards)
interesting = set(self.interesting_tags.split())
ignored = set(self.ignored_tags.split())
subscribed = set(self.subscribed_tags.split())
if reason == 'good':
target_set = interesting
other_set = ignored
elif reason == 'bad':
target_set = ignored
other_set = interesting
elif reason == 'subscribed':
target_set = subscribed
other_set = None
else:
assert(action == 'remove')
if action == 'add':
target_set.update(new_tags)
if reason in ('good', 'bad'):
other_set.difference_update(new_tags)
else:
target_set.difference_update(new_tags)
if reason in ('good', 'bad'):
other_set.difference_update(new_tags)
self.interesting_tags = ' '.join(interesting)
self.ignored_tags = ' '.join(ignored)
self.subscribed_tags = ' '.join(subscribed)
self.save()
return new_tags
def user_edit_group_membership(self, user=None, group=None,
action=None, force=False,
level=None
):
"""allows one user to add another to a group
or remove user from group.
If when adding, the group does not exist, it will be created
the delete function is not symmetric, the group will remain
even if it becomes empty
returns instance of GroupMembership (if action is "add") or None
"""
if action == 'add':
#calculate new level
openness = group.get_openness_level_for_user(user)
#let people join these special groups, but not leave
if not force:
if group.name == askbot_settings.GLOBAL_GROUP_NAME:
openness = 'open'
elif group.name == format_personal_group_name(user):
openness = 'open'
if openness == 'open':
level = level or GroupMembership.FULL
elif openness == 'moderated':
level = level or GroupMembership.PENDING
elif openness == 'closed':
raise django_exceptions.PermissionDenied()
else:
level = level or GroupMembership.FULL
membership, created = GroupMembership.objects.get_or_create(
user=user, group=group, level=level
)
return membership
elif action == 'remove':
GroupMembership.objects.get(user = user, group = group).delete()
return None
else:
raise ValueError('invalid action')
def user_join_group(self, group, force=False, level=None):
return self.edit_group_membership(group=group, user=self,
action='add', force=force,
level=level)
def user_leave_group(self, group):
self.edit_group_membership(group=group, user=self, action='remove')
def user_is_group_member(self, group=None):
"""True if user is member of group,
where group can be instance of Group
or name of group as string
"""
if isinstance(group, str):
return GroupMembership.objects.filter(
user=self, group__name=group
).count() == 1
else:
return GroupMembership.objects.filter(
user=self, group=group
).count() == 1
User.add_to_class(
'add_missing_askbot_subscriptions',
user_add_missing_askbot_subscriptions
)
User.add_to_class(
'is_username_taken',
classmethod(user_is_username_taken)
)
User.add_to_class(
'get_followed_question_alert_frequency',
user_get_followed_question_alert_frequency
)
User.add_to_class(
'get_top_answers_paginator',
user_get_top_answers_paginator
)
User.add_to_class(
'subscribe_for_followed_question_alerts',
user_subscribe_for_followed_question_alerts
)
User.add_to_class('get_absolute_url', user_get_absolute_url)
User.add_to_class('get_avatar_url', user_get_avatar_url)
User.add_to_class('get_default_avatar_url', user_get_default_avatar_url)
User.add_to_class('get_gravatar_url', user_get_gravatar_url)
User.add_to_class('get_or_create_fake_user', user_get_or_create_fake_user)
User.add_to_class('get_marked_tags', user_get_marked_tags)
User.add_to_class('get_marked_tag_names', user_get_marked_tag_names)
User.add_to_class('get_groups', user_get_groups)
User.add_to_class('get_foreign_groups', user_get_foreign_groups)
User.add_to_class('get_group_membership', user_get_group_membership)
User.add_to_class('get_personal_group', user_get_personal_group)
User.add_to_class('get_primary_group', user_get_primary_group)
User.add_to_class('get_notifications', user_get_notifications)
User.add_to_class('strip_email_signature', user_strip_email_signature)
User.add_to_class('get_groups_membership_info', user_get_groups_membership_info)
User.add_to_class('get_anonymous_name', user_get_anonymous_name)
User.add_to_class('get_social_sharing_mode', user_get_social_sharing_mode)
User.add_to_class('get_social_sharing_status', user_get_social_sharing_status)
User.add_to_class('update_avatar_type', user_update_avatar_type)
User.add_to_class('post_question', user_post_question)
User.add_to_class('edit_question', user_edit_question)
User.add_to_class('retag_question', user_retag_question)
User.add_to_class('repost_comment_as_answer', user_repost_comment_as_answer)
User.add_to_class('post_answer', user_post_answer)
User.add_to_class('edit_answer', user_edit_answer)
User.add_to_class('edit_post', user_edit_post)
User.add_to_class(
'post_anonymous_askbot_content',
user_post_anonymous_askbot_content
)
User.add_to_class('post_comment', user_post_comment)
User.add_to_class('edit_comment', user_edit_comment)
User.add_to_class('create_post_reject_reason', user_create_post_reject_reason)
User.add_to_class('edit_post_reject_reason', user_edit_post_reject_reason)
User.add_to_class('delete_post', user_delete_post)
User.add_to_class('post_object_description', user_post_object_description)
User.add_to_class('visit_question', user_visit_question)
User.add_to_class('upvote', upvote)
User.add_to_class('downvote', downvote)
User.add_to_class('flag_post', flag_post)
User.add_to_class('receive_reputation', user_receive_reputation)
User.add_to_class('get_flags', user_get_flags)
User.add_to_class(
'get_flag_count_posted_today',
user_get_flag_count_posted_today
)
User.add_to_class('get_flags_for_post', user_get_flags_for_post)
User.add_to_class('get_profile_url', user_get_profile_url)
User.add_to_class('get_profile_link', get_profile_link)
User.add_to_class('get_tag_filtered_questions', user_get_tag_filtered_questions)
User.add_to_class('get_messages', get_messages)
User.add_to_class('delete_messages', delete_messages)
User.add_to_class('toggle_favorite_question', toggle_favorite_question)
User.add_to_class('fix_html_links', user_fix_html_links)
User.add_to_class('follow_question', user_follow_question)
User.add_to_class('unfollow_question', user_unfollow_question)
User.add_to_class('is_following_question', user_is_following_question)
User.add_to_class('mark_tags', user_mark_tags)
User.add_to_class('merge_duplicate_questions', user_merge_duplicate_questions)
User.add_to_class('update_response_counts', user_update_response_counts)
User.add_to_class('can_create_tags', user_can_create_tags)
User.add_to_class('can_have_strong_url', user_can_have_strong_url)
User.add_to_class('can_post_by_email', user_can_post_by_email)
User.add_to_class('can_post_comment', user_can_post_comment)
User.add_to_class('can_make_group_private_posts', user_can_make_group_private_posts)
User.add_to_class('is_administrator', user_is_administrator)
User.add_to_class('is_administrator_or_moderator', user_is_administrator_or_moderator)
User.add_to_class('set_admin_status', user_set_admin_status)
User.add_to_class('edit_group_membership', user_edit_group_membership)
User.add_to_class('join_group', user_join_group)
User.add_to_class('leave_group', user_leave_group)
User.add_to_class('is_group_member', user_is_group_member)
User.add_to_class('remove_admin_status', user_remove_admin_status)
User.add_to_class('is_moderator', user_is_moderator)
User.add_to_class('is_post_moderator', user_is_post_moderator)
User.add_to_class('is_approved', user_is_approved)
User.add_to_class('is_watched', user_is_watched)
User.add_to_class('is_suspended', user_is_suspended)
User.add_to_class('is_blocked', user_is_blocked)
User.add_to_class('is_owner_of', user_is_owner_of)
User.add_to_class('has_interesting_wildcard_tags', user_has_interesting_wildcard_tags)
User.add_to_class('has_ignored_wildcard_tags', user_has_ignored_wildcard_tags)
User.add_to_class('can_moderate_user', user_can_moderate_user)
User.add_to_class('has_affinity_to_question', user_has_affinity_to_question)
User.add_to_class('has_badge', user_has_badge)
User.add_to_class('moderate_user_reputation', user_moderate_user_reputation)
User.add_to_class('set_status', user_set_status)
User.add_to_class('get_badge_summary', user_get_badge_summary)
User.add_to_class('get_primary_language', user_get_primary_language)
User.add_to_class('get_status_display', user_get_status_display)
User.add_to_class('get_old_vote_for_post', user_get_old_vote_for_post)
User.add_to_class('get_unused_votes_today', user_get_unused_votes_today)
User.add_to_class('delete_comment', user_delete_comment)
User.add_to_class('delete_question', user_delete_question)
User.add_to_class('delete_answer', user_delete_answer)
User.add_to_class(
'delete_all_content_authored_by_user',
user_delete_all_content_authored_by_user
)
User.add_to_class('restore_post', user_restore_post)
User.add_to_class('close_question', user_close_question)
User.add_to_class('reopen_question', user_reopen_question)
User.add_to_class('accept_best_answer', user_accept_best_answer)
User.add_to_class('unaccept_best_answer', user_unaccept_best_answer)
User.add_to_class(
'update_wildcard_tag_selections',
user_update_wildcard_tag_selections
)
User.add_to_class('approve_post_revision', user_approve_post_revision)
User.add_to_class('needs_moderation', user_needs_moderation)
User.add_to_class('notify_users', user_notify_users)
User.add_to_class('is_read_only', user_is_read_only)
#assertions
User.add_to_class('assert_can_vote_for_post', user_assert_can_vote_for_post)
User.add_to_class('assert_can_revoke_old_vote', user_assert_can_revoke_old_vote)
User.add_to_class('assert_can_upload_file', user_assert_can_upload_file)
User.add_to_class('assert_can_merge_questions', user_assert_can_merge_questions)
User.add_to_class('assert_can_post_question', user_assert_can_post_question)
User.add_to_class('assert_can_post_answer', user_assert_can_post_answer)
User.add_to_class('assert_can_post_comment', user_assert_can_post_comment)
User.add_to_class('assert_can_post_text', user_assert_can_post_text)
User.add_to_class('assert_can_edit_post', user_assert_can_edit_post)
User.add_to_class('assert_can_edit_deleted_post', user_assert_can_edit_deleted_post)
User.add_to_class('assert_can_see_deleted_post', user_assert_can_see_deleted_post)
User.add_to_class('assert_can_edit_question', user_assert_can_edit_question)
User.add_to_class('assert_can_edit_answer', user_assert_can_edit_answer)
User.add_to_class('assert_can_close_question', user_assert_can_close_question)
User.add_to_class('assert_can_reopen_question', user_assert_can_reopen_question)
User.add_to_class('assert_can_flag_offensive', user_assert_can_flag_offensive)
User.add_to_class('assert_can_remove_flag_offensive', user_assert_can_remove_flag_offensive)
User.add_to_class('assert_can_remove_all_flags_offensive', user_assert_can_remove_all_flags_offensive)
User.add_to_class('assert_can_retag_question', user_assert_can_retag_question)
#todo: do we need assert_can_delete_post
User.add_to_class('assert_can_delete_post', user_assert_can_delete_post)
User.add_to_class('assert_can_restore_post', user_assert_can_restore_post)
User.add_to_class('assert_can_delete_comment', user_assert_can_delete_comment)
User.add_to_class('assert_can_edit_comment', user_assert_can_edit_comment)
User.add_to_class('assert_can_convert_post', user_assert_can_convert_post)
User.add_to_class('assert_can_delete_answer', user_assert_can_delete_answer)
User.add_to_class('assert_can_delete_question', user_assert_can_delete_question)
User.add_to_class('assert_can_accept_best_answer', user_assert_can_accept_best_answer)
User.add_to_class(
'assert_can_unaccept_best_answer',
user_assert_can_unaccept_best_answer
)
User.add_to_class(
'assert_can_approve_post_revision',
user_assert_can_approve_post_revision
)
#todo: move this to askbot/mail ?
def format_instant_notification_email(
to_user = None,
from_user = None,
post = None,
reply_address = None,
alt_reply_address = None,
update_type = None,
template = None,
):
"""
returns text of the instant notification body
and subject line
that is built when post is updated
only update_types in const.RESPONSE_ACTIVITY_TYPE_MAP_FOR_TEMPLATES
are supported
"""
origin_post = post.get_origin_post()
if update_type == 'question_comment':
assert(isinstance(post, Post) and post.is_comment())
assert(post.parent and post.parent.is_question())
elif update_type == 'answer_comment':
assert(isinstance(post, Post) and post.is_comment())
assert(post.parent and post.parent.is_answer())
elif update_type == 'answer_update':
assert(isinstance(post, Post) and post.is_answer())
elif update_type == 'new_answer':
assert(isinstance(post, Post) and post.is_answer())
elif update_type == 'question_update':
assert(isinstance(post, Post) and post.is_question())
elif update_type == 'new_question':
assert(isinstance(post, Post) and post.is_question())
elif update_type == 'post_shared':
pass
else:
raise ValueError('unexpected update_type %s' % update_type)
if update_type.endswith('update'):
assert('comment' not in update_type)
revisions = post.revisions.all()[:2]
assert(len(revisions) == 2)
content_preview = htmldiff(
sanitize_html(revisions[1].html),
sanitize_html(revisions[0].html),
ins_start = '<b><u style="background-color:#cfc">',
ins_end = '</u></b>',
del_start = '<del style="color:#600;background-color:#fcc">',
del_end = '</del>'
)
#todo: remove hardcoded style
else:
content_preview = post.format_for_email(is_leaf_post=True, recipient=to_user)
#add indented summaries for the parent posts
content_preview += post.format_for_email_as_parent_thread_summary(recipient=to_user)
#content_preview += '<p>======= Full thread summary =======</p>'
#content_preview += post.thread.format_for_email(recipient=to_user)
if update_type == 'post_shared':
user_action = _('%(user)s shared a %(post_link)s.')
elif post.is_comment():
if update_type.endswith('update'):
user_action = _('%(user)s edited a %(post_link)s.')
else:
user_action = _('%(user)s posted a %(post_link)s')
elif post.is_answer():
if update_type.endswith('update'):
user_action = _('%(user)s edited an %(post_link)s.')
else:
user_action = _('%(user)s posted an %(post_link)s.')
elif post.is_question():
if update_type.endswith('update'):
user_action = _('%(user)s edited a %(post_link)s.')
else:
user_action = _('%(user)s posted a %(post_link)s.')
else:
raise ValueError('unrecognized post type')
post_url = site_url(post.get_absolute_url())
user_url = site_url(from_user.get_absolute_url())
if to_user.is_administrator_or_moderator() and askbot_settings.SHOW_ADMINS_PRIVATE_USER_DATA:
user_link_fmt = '<a href="%(profile_url)s">%(username)s</a> (<a href="mailto:%(email)s">%(email)s</a>)'
user_link = user_link_fmt % {
'profile_url': user_url,
'username': from_user.username,
'email': from_user.email
}
elif post.is_anonymous:
user_link = from_user.get_name_of_anonymous_user()
else:
user_link = '<a href="%s">%s</a>' % (user_url, from_user.username)
user_action = user_action % {
'user': user_link,
'post_link': '<a href="%s">%s</a>' % (post_url, _(post.post_type))
}
can_reply = to_user.can_post_by_email()
if can_reply:
reply_separator = const.SIMPLE_REPLY_SEPARATOR_TEMPLATE % \
_('To reply, PLEASE WRITE ABOVE THIS LINE.')
if post.post_type == 'question' and alt_reply_address:
data = {
'addr': alt_reply_address,
'subject': urllib.quote(
('Re: ' + post.thread.title).encode('utf-8')
)
}
reply_separator += '<p>' + \
const.REPLY_WITH_COMMENT_TEMPLATE % data
reply_separator += '</p>'
else:
reply_separator = '<p>%s</p>' % reply_separator
reply_separator += user_action
else:
reply_separator = user_action
user_subscriptions_url = reverse(
'user_subscriptions',
kwargs = {
'id': to_user.id,
'slug': slugify(to_user.username)
}
)
update_data = {
'admin_email': askbot_settings.ADMIN_EMAIL,
'recipient_user': to_user,
'update_author_name': from_user.username,
'receiving_user_name': to_user.username,
'receiving_user_karma': to_user.reputation,
'reply_by_email_karma_threshold': askbot_settings.MIN_REP_TO_POST_BY_EMAIL,
'can_reply': can_reply,
'content_preview': content_preview,
'update_type': update_type,
'post_url': post_url,
'origin_post_title': origin_post.thread.title,
'user_subscriptions_url': site_url(user_subscriptions_url),
'reply_separator': reply_separator,
'reply_address': reply_address,
'is_multilingual': getattr(django_settings, 'ASKBOT_MULTILINGUAL', False)
}
subject_line = _('"%(title)s"') % {'title': origin_post.thread.title}
content = template.render(Context(update_data))
return subject_line, content
def get_reply_to_addresses(user, post):
"""Returns one or two email addresses that can be
used by a given `user` to reply to the `post`
the first address - always a real email address,
the second address is not ``None`` only for "question" posts.
When the user is notified of a new question -
i.e. `post` is a "quesiton", he/she
will need to choose - whether to give a question or a comment,
thus we return the second address - for the comment reply.
When the post is a "question", the first email address
is for posting an "answer", and when post is either
"comment" or "answer", the address will be for posting
a "comment".
"""
#these variables will contain return values
primary_addr = django_settings.DEFAULT_FROM_EMAIL
secondary_addr = None
if user.can_post_by_email():
if user.reputation >= askbot_settings.MIN_REP_TO_POST_BY_EMAIL:
reply_args = {
'post': post,
'user': user,
'reply_action': 'post_comment'
}
if post.post_type in ('answer', 'comment'):
reply_args['reply_action'] = 'post_comment'
elif post.post_type == 'question':
reply_args['reply_action'] = 'post_answer'
primary_addr = ReplyAddress.objects.create_new(
**reply_args
).as_email_address()
if post.post_type == 'question':
reply_args['reply_action'] = 'post_comment'
secondary_addr = ReplyAddress.objects.create_new(
**reply_args
).as_email_address()
return primary_addr, secondary_addr
def notify_author_of_published_revision(revision=None, was_approved=False, **kwargs):
"""notifies author about approved post revision,
assumes that we have the very first revision
"""
#only email about first revision
if revision.should_notify_author_about_publishing(was_approved):
from askbot.tasks import notify_author_of_published_revision_celery_task
notify_author_of_published_revision_celery_task.delay(revision)
#todo: move to utils
def calculate_gravatar_hash(instance, **kwargs):
"""Calculates a User's gravatar hash from their email address."""
if kwargs.get('raw', False):
return
clean_email = instance.email.strip().lower()
instance.gravatar = hashlib.md5(clean_email).hexdigest()
def record_post_update_activity(
post,
newly_mentioned_users=None,
updated_by=None,
suppress_email=False,
timestamp=None,
created=False,
diff=None,
**kwargs
):
"""called upon signal askbot.models.signals.post_updated
which is sent at the end of save() method in posts
this handler will set notifications about the post
"""
assert(timestamp != None)
assert(updated_by != None)
if newly_mentioned_users is None:
newly_mentioned_users = list()
from askbot import tasks
tasks.record_post_update_celery_task.delay(
post_id=post.id,
post_content_type_id=ContentType.objects.get_for_model(post).id,
newly_mentioned_user_id_list=[u.id for u in newly_mentioned_users],
updated_by_id=updated_by.id,
suppress_email=suppress_email,
timestamp=timestamp,
created=created,
diff=diff,
)
def record_award_event(instance, created, **kwargs):
"""
After we awarded a badge to user, we need to
record this activity and notify user.
We also recaculate awarded_count of this badge and user information.
"""
if created:
#todo: change this to community user who gives the award
activity = Activity(
user=instance.user,
active_at=instance.awarded_at,
content_object=instance,
activity_type=const.TYPE_ACTIVITY_PRIZE
)
activity.save()
activity.add_recipients([instance.user])
instance.badge.awarded_count += 1
instance.badge.save()
badge = get_badge(instance.badge.slug)
if badge.level == const.GOLD_BADGE:
instance.user.gold += 1
if badge.level == const.SILVER_BADGE:
instance.user.silver += 1
if badge.level == const.BRONZE_BADGE:
instance.user.bronze += 1
instance.user.save()
def notify_award_message(instance, created, **kwargs):
"""
Notify users when they have been awarded badges by using Django message.
"""
if askbot_settings.BADGES_MODE != 'public':
return
if created:
user = instance.user
badge = get_badge(instance.badge.slug)
msg = _(u"Congratulations, you have received a badge '%(badge_name)s'. "
u"Check out <a href=\"%(user_profile)s\">your profile</a>.") \
% {
'badge_name':badge.name,
'user_profile':user.get_profile_url()
}
user.message_set.create(message=msg)
def record_answer_accepted(instance, created, **kwargs):
"""
when answer is accepted, we record this for question author
- who accepted it.
"""
if instance.post_type != 'answer':
return
question = instance.thread._question_post()
if not created and instance.accepted():
activity = Activity(
user=question.author,
active_at=datetime.datetime.now(),
content_object=question,
activity_type=const.TYPE_ACTIVITY_MARK_ANSWER,
question=question
)
activity.save()
recipients = instance.get_author_list(
exclude_list = [question.author]
)
activity.add_recipients(recipients)
def record_user_visit(user, timestamp, **kwargs):
"""
when user visits any pages, we update the last_seen and
consecutive_days_visit_count
"""
prev_last_seen = user.last_seen or datetime.datetime.now()
user.last_seen = timestamp
consecutive_days = user.consecutive_days_visit_count
if (user.last_seen.date() - prev_last_seen.date()).days == 1:
user.consecutive_days_visit_count += 1
consecutive_days = user.consecutive_days_visit_count
award_badges_signal.send(None,
event = 'site_visit',
actor = user,
context_object = user,
timestamp = timestamp
)
#somehow it saves on the query as compared to user.save()
update_data = {
'last_seen': timestamp,
'consecutive_days_visit_count': consecutive_days
}
User.objects.filter(id=user.id).update(**update_data)
def record_vote(instance, created, **kwargs):
"""
when user have voted
"""
if created:
if instance.vote == 1:
vote_type = const.TYPE_ACTIVITY_VOTE_UP
else:
vote_type = const.TYPE_ACTIVITY_VOTE_DOWN
activity = Activity(
user=instance.user,
active_at=instance.voted_at,
content_object=instance,
activity_type=vote_type
)
#todo: problem cannot access receiving user here
activity.save()
def record_cancel_vote(instance, **kwargs):
"""
when user canceled vote, the vote will be deleted.
"""
activity = Activity(
user=instance.user,
active_at=datetime.datetime.now(),
content_object=instance,
activity_type=const.TYPE_ACTIVITY_CANCEL_VOTE
)
#todo: same problem - cannot access receiving user here
activity.save()
#todo: weird that there is no record delete answer or comment
#is this even necessary to keep track of?
def record_delete_question(instance, delete_by, **kwargs):
"""
when user deleted the question
"""
if instance.post_type == 'question':
activity_type = const.TYPE_ACTIVITY_DELETE_QUESTION
elif instance.post_type == 'answer':
activity_type = const.TYPE_ACTIVITY_DELETE_ANSWER
else:
return
activity = Activity(
user=delete_by,
active_at=datetime.datetime.now(),
content_object=instance,
activity_type=activity_type,
question = instance.get_origin_post()
)
#no need to set receiving user here
activity.save()
def record_flag_offensive(instance, mark_by, **kwargs):
activity = Activity(
user=mark_by,
active_at=datetime.datetime.now(),
content_object=instance,
activity_type=const.TYPE_ACTIVITY_MARK_OFFENSIVE,
question=instance.get_origin_post()
)
activity.save()
# todo: report authors that their post is flagged offensive
# recipients = instance.get_author_list(
# exclude_list = [mark_by]
# )
activity.add_recipients(instance.get_moderators())
def remove_flag_offensive(instance, mark_by, **kwargs):
"Remove flagging activity"
content_type = ContentType.objects.get_for_model(instance)
activity = Activity.objects.filter(
user=mark_by,
content_type = content_type,
object_id = instance.id,
activity_type=const.TYPE_ACTIVITY_MARK_OFFENSIVE,
question=instance.get_origin_post()
)
activity.delete()
def record_update_tags(thread, tags, user, timestamp, **kwargs):
"""
This function sends award badges signal on each updated tag
the badges that respond to the 'ta
"""
for tag in tags:
award_badges_signal.send(None,
event = 'update_tag',
actor = user,
context_object = tag,
timestamp = timestamp
)
question = thread._question_post()
activity = Activity(
user=user,
active_at=datetime.datetime.now(),
content_object=question,
activity_type=const.TYPE_ACTIVITY_UPDATE_TAGS,
question = question
)
activity.save()
def record_favorite_question(instance, created, **kwargs):
"""
when user add the question in him favorite questions list.
"""
if created:
activity = Activity(
user=instance.user,
active_at=datetime.datetime.now(),
content_object=instance,
activity_type=const.TYPE_ACTIVITY_FAVORITE,
question=instance.thread._question_post()
)
activity.save()
recipients = instance.thread._question_post().get_author_list(
exclude_list = [instance.user]
)
activity.add_recipients(recipients)
def record_user_full_updated(instance, **kwargs):
activity = Activity(
user=instance,
active_at=datetime.datetime.now(),
content_object=instance,
activity_type=const.TYPE_ACTIVITY_USER_FULL_UPDATED
)
activity.save()
def send_respondable_email_validation_message(
user = None, subject_line = None, data = None, template_name = None
):
"""sends email validation message to the user
We validate email by getting user's reply
to the validation message by email, which also gives
an opportunity to extract user's email signature.
"""
reply_address = ReplyAddress.objects.create_new(
user = user,
reply_action = 'validate_email'
)
data['email_code'] = reply_address.address
template = get_template(template_name)
body_text = template.render(Context(data))#todo: set lang
reply_to_address = 'welcome-%s@%s' % (
reply_address.address,
askbot_settings.REPLY_BY_EMAIL_HOSTNAME
)
mail.send_mail(
subject_line = subject_line,
body_text = body_text,
recipient_list = [user.email, ],
activity_type = const.TYPE_ACTIVITY_VALIDATION_EMAIL_SENT,
headers = {'Reply-To': reply_to_address}
)
def add_user_to_global_group(sender, instance, created, **kwargs):
"""auto-joins user to the global group
``instance`` is an instance of ``User`` class
"""
if created:
instance.edit_group_membership(
group=Group.objects.get_global_group(),
user=instance,
action='add'
)
def add_user_to_personal_group(sender, instance, created, **kwargs):
"""auto-joins user to his/her personal group
``instance`` is an instance of ``User`` class
"""
if created:
#todo: groups will indeed need to be separated from tags
#so that we can use less complicated naming scheme
#in theore here we may have two users that will have
#identical group names!!!
group_name = format_personal_group_name(instance)
group = Group.objects.get_or_create(
name=group_name, user=instance
)
instance.edit_group_membership(
group=group, user=instance, action='add'
)
def greet_new_user(user, **kwargs):
"""sends welcome email to the newly created user
todo: second branch should send email with a simple
clickable link.
"""
if askbot_settings.NEW_USER_GREETING:
user.message_set.create(message = askbot_settings.NEW_USER_GREETING)
import sys
if 'test' in sys.argv:
return
if askbot_settings.REPLY_BY_EMAIL:#with this on we also collect signature
template_name = 'email/welcome_lamson_on.html'
else:
template_name = 'email/welcome_lamson_off.html'
data = {
'recipient_user': user,
'site_name': askbot_settings.APP_SHORT_NAME,
'site_url': site_url(reverse('questions')),
'ask_address': 'ask@' + askbot_settings.REPLY_BY_EMAIL_HOSTNAME,
'can_post_by_email': user.can_post_by_email()
}
send_respondable_email_validation_message(
user=user,
subject_line=_('Welcome to %(site_name)s') % data,
data=data,
template_name=template_name
)
def complete_pending_tag_subscriptions(sender, request, *args, **kwargs):
"""save pending tag subscriptions saved in the session"""
if 'subscribe_for_tags' in request.session:
(pure_tag_names, wildcards) = request.session.pop('subscribe_for_tags')
if askbot_settings.SUBSCRIBED_TAG_SELECTOR_ENABLED:
reason = 'subscribed'
else:
reason = 'good'
request.user.mark_tags(
pure_tag_names,
wildcards,
reason = reason,
action = 'add'
)
request.user.message_set.create(
message = _('Your tag subscription was saved, thanks!')
)
def add_missing_subscriptions(sender, instance, created, **kwargs):
"""``sender`` is instance of ``User``. When the ``User``
is created, any required email subscription settings will be
added by this handler"""
if created:
instance.add_missing_askbot_subscriptions()
def add_missing_tag_subscriptions(sender, instance, created, **kwargs):
'''``sender` is instance of `User``. When the user is created
it add the tag subscriptions to the user via BulkTagSubscription
and MarkedTags.
'''
if created:
if askbot_settings.SUBSCRIBED_TAG_SELECTOR_ENABLED and \
askbot_settings.GROUPS_ENABLED:
user_groups = instance.get_groups()
for subscription in BulkTagSubscription.objects.filter(groups__in = user_groups):
tag_list = subscription.tag_list()
instance.mark_tags(tagnames = tag_list,
reason='subscribed', action='add')
def notify_punished_users(user, **kwargs):
try:
_assert_user_can(
user=user,
blocked_user_cannot=True,
suspended_user_cannot=True
)
except django_exceptions.PermissionDenied, e:
user.message_set.create(message = unicode(e))
def post_anonymous_askbot_content(
sender,
request,
user,
session_key,
signal,
*args,
**kwargs):
"""signal handler, unfortunately extra parameters
are necessary for the signal machinery, even though
they are not used in this function"""
if user.is_blocked() or user.is_suspended():
pass
else:
user.post_anonymous_askbot_content(session_key)
def set_user_avatar_type_flag(instance, created, **kwargs):
instance.user.update_avatar_type()
def update_user_avatar_type_flag(instance, **kwargs):
instance.user.update_avatar_type()
def make_admin_if_first_user(user, **kwargs):
"""first user automatically becomes an administrator
the function is run only once in the interpreter session
function is run when user registers
"""
import sys
user_count = User.objects.all().count()
if user_count == 1:
user.set_status('d')
def moderate_group_joining(sender, instance=None, created=False, **kwargs):
if created and instance.level == GroupMembership.PENDING:
user = instance.user
group = instance.group
user.notify_users(
notification_type=const.TYPE_ACTIVITY_ASK_TO_JOIN_GROUP,
recipients = group.get_moderators(),
content_object = group
)
#this variable and the signal handler below is
#needed to work around the issue in the django admin
#where auth_user table editing affects group memberships
GROUP_MEMBERSHIP_LEVELS = dict()
def group_membership_changed(**kwargs):
sender = kwargs['sender']
user = kwargs['instance']
action = kwargs['action']
reverse = kwargs['reverse']
model = kwargs['model']
pk_set = kwargs['pk_set']
if reverse:
raise NotImplementedError()
#store group memberships info
#and then delete group memberships
if action == 'pre_clear':
#get membership info, if exists, save
memberships = GroupMembership.objects.filter(user=user)
for gm in memberships:
GROUP_MEMBERSHIP_LEVELS[(user.id, gm.group.id)] = gm.level
memberships.delete()
elif action == 'post_add':
group_ids = pk_set
for group_id in group_ids:
gm_key = (user.id, group_id)
#mend group membership if it does not exist
if not GroupMembership.objects.filter(user=user, group__id=group_id).exists():
try:
group = Group.objects.get(id=group_id)
except Group.DoesNotExist:
#this is not an Askbot group, no group profile
#so we don't add anything here
pass
else:
#restore group membership here
level = GROUP_MEMBERSHIP_LEVELS.get(gm_key)
GroupMembership.objects.create(user=user, group=group, level=level)
GROUP_MEMBERSHIP_LEVELS.pop(gm_key, None)
def tweet_new_post(sender, user=None, question=None, answer=None, form_data=None, **kwargs):
"""seends out tweets about the new post"""
from askbot.tasks import tweet_new_post_task
post = question or answer
tweet_new_post_task.delay(post.id)
def autoapprove_reputable_user(user=None, reputation_before=None, *args, **kwargs):
"""if user is 'watched' we change status to 'approved'
if user's rep crossed the auto-approval margin"""
margin = askbot_settings.MIN_REP_TO_AUTOAPPROVE_USER
if user.is_watched() and reputation_before < margin and user.reputation >= margin:
user.set_status('a')
def init_badge_data(sender, created_models=None, **kwargs):
if BadgeData in created_models:
from askbot.models import badges
badges.init_badges()
django_signals.post_syncdb.connect(init_badge_data)
#signal for User model save changes
django_signals.pre_save.connect(calculate_gravatar_hash, sender=User)
django_signals.post_save.connect(add_missing_subscriptions, sender=User)
django_signals.post_save.connect(add_user_to_global_group, sender=User)
django_signals.post_save.connect(add_user_to_personal_group, sender=User)
django_signals.post_save.connect(add_missing_tag_subscriptions, sender=User)
django_signals.post_save.connect(record_award_event, sender=Award)
django_signals.post_save.connect(notify_award_message, sender=Award)
django_signals.post_save.connect(record_answer_accepted, sender=Post)
django_signals.post_save.connect(record_vote, sender=Vote)
django_signals.post_save.connect(record_favorite_question, sender=FavoriteQuestion)
django_signals.post_save.connect(moderate_group_joining, sender=GroupMembership)
django_signals.m2m_changed.connect(group_membership_changed, sender=User.groups.through)
if 'avatar' in django_settings.INSTALLED_APPS:
from avatar.models import Avatar
django_signals.post_save.connect(set_user_avatar_type_flag, sender=Avatar)
django_signals.post_delete.connect(update_user_avatar_type_flag, sender=Avatar)
django_signals.post_delete.connect(record_cancel_vote, sender=Vote)
#change this to real m2m_changed with Django1.2
signals.delete_question_or_answer.connect(record_delete_question, sender=Post)
signals.flag_offensive.connect(record_flag_offensive, sender=Post)
signals.remove_flag_offensive.connect(remove_flag_offensive, sender=Post)
signals.tags_updated.connect(record_update_tags)
signals.user_registered.connect(greet_new_user)
signals.user_registered.connect(make_admin_if_first_user)
signals.user_updated.connect(record_user_full_updated, sender=User)
signals.user_logged_in.connect(complete_pending_tag_subscriptions)#todo: add this to fake onlogin middleware
signals.user_logged_in.connect(notify_punished_users)
signals.user_logged_in.connect(post_anonymous_askbot_content)
signals.post_updated.connect(record_post_update_activity)
signals.new_answer_posted.connect(tweet_new_post)
signals.new_question_posted.connect(tweet_new_post)
signals.reputation_received.connect(autoapprove_reputable_user)
#probably we cannot use post-save here the point of this is
#to tell when the revision becomes publicly visible, not when it is saved
signals.post_revision_published.connect(notify_author_of_published_revision)
signals.site_visited.connect(record_user_visit)
__all__ = [
'signals',
'Thread',
'QuestionView',
'FavoriteQuestion',
'AnonymousQuestion',
'DraftQuestion',
'AnonymousAnswer',
'DraftAnswer',
'Post',
'PostRevision',
'PostToGroup',
'Tag',
'Vote',
'PostFlagReason',
'MarkedTag',
'TagSynonym',
'BadgeData',
'Award',
'Repute',
'Activity',
'ActivityAuditStatus',
'EmailFeedSetting',
'GroupMembership',
'Group',
'User',
'ReplyAddress',
'ImportRun',
'ImportedObjectInfo',
'get_model',
]
| gpl-3.0 |
stenvix/lpschedule | migrations/versions/84474ea117fe_.py | 2 | 3379 | """empty message
Revision ID: 84474ea117fe
Revises: None
Create Date: 2016-07-07 17:54:41.579400
"""
# revision identifiers, used by Alembic.
revision = '84474ea117fe'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('group',
sa.Column('group_id', sa.Integer(), nullable=False),
sa.Column('group_full_name', sa.String(), nullable=True),
sa.Column('group_url', sa.String(), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('group_id')
)
op.create_table('institute',
sa.Column('institute_id', sa.Integer(), nullable=False),
sa.Column('institute_abbr', sa.String(length=10, convert_unicode=True), nullable=True),
sa.Column('institute_full_name', sa.String(convert_unicode=True), nullable=True),
sa.PrimaryKeyConstraint('institute_id'),
sa.UniqueConstraint('institute_abbr')
)
op.create_table('teacher',
sa.Column('teacher_id', sa.Integer(), nullable=False),
sa.Column('teacher_name', sa.Unicode(), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('teacher_id'),
sa.UniqueConstraint('teacher_name')
)
op.create_table('time',
sa.Column('time_id', sa.Integer(), nullable=False),
sa.Column('time_number', sa.Integer(), nullable=True),
sa.Column('time_start', sa.Time(), nullable=True),
sa.Column('time_end', sa.Time(), nullable=True),
sa.PrimaryKeyConstraint('time_id'),
sa.UniqueConstraint('time_number')
)
op.create_table('lesson',
sa.Column('lesson_id', sa.Integer(), nullable=False),
sa.Column('lesson_name', sa.Unicode(), nullable=True),
sa.Column('lesson_number', sa.Integer(), nullable=True),
sa.Column('lesson_type', sa.Unicode(), nullable=True),
sa.Column('lesson_week', sa.Integer(), nullable=True),
sa.Column('subgroup', sa.Integer(), nullable=True),
sa.Column('room', sa.Unicode(), nullable=True),
sa.Column('semester_part', sa.Integer(), nullable=True),
sa.Column('day_number', sa.Integer(), nullable=True),
sa.Column('day_name', sa.Unicode(), nullable=True),
sa.Column('teacher_id', sa.Integer(), nullable=True),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.Column('time_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['group.group_id'], ),
sa.ForeignKeyConstraint(['teacher_id'], ['teacher.teacher_id'], ),
sa.ForeignKeyConstraint(['time_id'], ['time.time_id'], ),
sa.PrimaryKeyConstraint('lesson_id')
)
op.create_table('lesson_teacher',
sa.Column('lessonteacher_id', sa.Integer(), nullable=False),
sa.Column('teacher_id', sa.Integer(), nullable=True),
sa.Column('lesson_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['lesson_id'], ['lesson.lesson_id'], ),
sa.ForeignKeyConstraint(['teacher_id'], ['teacher.teacher_id'], ),
sa.PrimaryKeyConstraint('lessonteacher_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('lesson_teacher')
op.drop_table('lesson')
op.drop_table('time')
op.drop_table('teacher')
op.drop_table('institute')
op.drop_table('group')
### end Alembic commands ###
| mit |
caLew/sugartest | src/jarabe/journal/detailview.py | 12 | 4034 | # Copyright (C) 2007, One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
from gettext import gettext as _
from gi.repository import GObject
from gi.repository import Gtk
from sugar3.graphics import style
from sugar3.graphics.icon import Icon
from jarabe.journal.expandedentry import ExpandedEntry
from jarabe.journal import model
class DetailView(Gtk.VBox):
__gtype_name__ = 'DetailView'
__gsignals__ = {
'go-back-clicked': (GObject.SignalFlags.RUN_FIRST, None, ([])),
}
def __init__(self, journalactivity, **kwargs):
self._journalactivity = journalactivity
self._metadata = None
self._expanded_entry = None
Gtk.VBox.__init__(self)
back_bar = BackBar()
back_bar.connect('button-release-event',
self.__back_bar_release_event_cb)
self.pack_start(back_bar, False, True, 0)
self.show_all()
def _fav_icon_activated_cb(self, fav_icon):
keep = not self._expanded_entry.get_keep()
self._expanded_entry.set_keep(keep)
fav_icon.props.keep = keep
def __back_bar_release_event_cb(self, back_bar, event):
self.emit('go-back-clicked')
return False
def _update_view(self):
if self._expanded_entry is None:
self._expanded_entry = ExpandedEntry(self._journalactivity)
self.pack_start(self._expanded_entry, True, True, 0)
self._expanded_entry.set_metadata(self._metadata)
self.show_all()
def refresh(self):
logging.debug('DetailView.refresh')
self._metadata = model.get(self._metadata['uid'])
self._update_view()
def get_metadata(self):
return self._metadata
def set_metadata(self, metadata):
self._metadata = metadata
self._update_view()
metadata = GObject.property(
type=object, getter=get_metadata, setter=set_metadata)
class BackBar(Gtk.EventBox):
def __init__(self):
Gtk.EventBox.__init__(self)
self.modify_bg(Gtk.StateType.NORMAL,
style.COLOR_PANEL_GREY.get_gdk_color())
hbox = Gtk.HBox(spacing=style.DEFAULT_PADDING)
hbox.set_border_width(style.DEFAULT_PADDING)
icon = Icon(icon_name='go-previous', pixel_size=style.SMALL_ICON_SIZE,
fill_color=style.COLOR_TOOLBAR_GREY.get_svg())
hbox.pack_start(icon, False, False, 0)
label = Gtk.Label()
label.set_text(_('Back'))
halign = Gtk.Alignment.new(0, 0.5, 0, 1)
halign.add(label)
hbox.pack_start(halign, True, True, 0)
hbox.show()
self.add(hbox)
if Gtk.Widget.get_default_direction() == Gtk.TextDirection.RTL:
# Reverse hbox children.
for child in hbox.get_children():
hbox.reorder_child(child, 0)
self.connect('enter-notify-event', self.__enter_notify_event_cb)
self.connect('leave-notify-event', self.__leave_notify_event_cb)
def __enter_notify_event_cb(self, box, event):
box.modify_bg(Gtk.StateType.NORMAL,
style.COLOR_SELECTION_GREY.get_gdk_color())
return False
def __leave_notify_event_cb(self, box, event):
box.modify_bg(Gtk.StateType.NORMAL,
style.COLOR_PANEL_GREY.get_gdk_color())
return False
| gpl-2.0 |
DARKPOP/external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/common/version_check.py | 70 | 1733 | # Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
if sys.version < '2.7' or sys.version >= '2.8':
sys.stderr.write("Unsupported Python version: webkitpy requires 2.7.x, and you're running %s.\n" % sys.version.split()[0])
sys.exit(1)
| bsd-3-clause |
lanselin/pysal | pysal/esda/tests/test_moran.py | 5 | 7346 | import unittest
import pysal
from .. import moran
from ...common import pandas, RTOL, ATOL
import numpy as np
PANDAS_EXTINCT = pandas is None
class Moran_Tester(unittest.TestCase):
def setUp(self):
self.w = pysal.open(pysal.examples.get_path("stl.gal")).read()
f = pysal.open(pysal.examples.get_path("stl_hom.txt"))
self.y = np.array(f.by_col['HR8893'])
def test_moran(self):
mi = moran.Moran(self.y, self.w, two_tailed=False)
np.testing.assert_allclose(mi.I, 0.24365582621771659, rtol=RTOL, atol=ATOL)
self.assertAlmostEquals(mi.p_norm, 0.00013573931385468807)
def test_sids(self):
w = pysal.open(pysal.examples.get_path("sids2.gal")).read()
f = pysal.open(pysal.examples.get_path("sids2.dbf"))
SIDR = np.array(f.by_col("SIDR74"))
mi = pysal.Moran(SIDR, w, two_tailed=False)
np.testing.assert_allclose(mi.I, 0.24772519320480135, atol=ATOL, rtol=RTOL)
self.assertAlmostEquals(mi.p_norm, 5.7916539074498452e-05)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pysal.contrib.pdio as pdio
df = pdio.read_files(pysal.examples.get_path('sids2.dbf'))
w = pysal.open(pysal.examples.get_path("sids2.gal")).read()
mi = moran.Moran.by_col(df, ['SIDR74'], w=w, two_tailed=False)
sidr = np.unique(mi.SIDR74_moran.values)
pval = np.unique(mi.SIDR74_p_sim.values)
np.testing.assert_allclose(sidr, 0.24772519320480135, atol=ATOL, rtol=RTOL)
self.assertAlmostEquals(pval, 0.001)
class Moran_Rate_Tester(unittest.TestCase):
def setUp(self):
self.w = pysal.open(pysal.examples.get_path("sids2.gal")).read()
f = pysal.open(pysal.examples.get_path("sids2.dbf"))
self.e = np.array(f.by_col['SID79'])
self.b = np.array(f.by_col['BIR79'])
def test_moran_rate(self):
mi = moran.Moran_Rate(self.e, self.b, self.w, two_tailed=False)
np.testing.assert_allclose(mi.I, 0.16622343552567395, rtol=RTOL, atol=ATOL)
self.assertAlmostEquals(mi.p_norm, 0.004191499504892171)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pysal.contrib.pdio as pdio
df = pdio.read_files(pysal.examples.get_path('sids2.dbf'))
mi = moran.Moran_Rate.by_col(df, ['SID79'], ['BIR79'], w=self.w, two_tailed=False)
sidr = np.unique(mi["SID79-BIR79_moran_rate"].values)
pval = np.unique(mi["SID79-BIR79_p_sim"].values)
np.testing.assert_allclose(sidr, 0.16622343552567395, rtol=RTOL, atol=ATOL)
self.assertAlmostEquals(pval, 0.009)
class Moran_BV_matrix_Tester(unittest.TestCase):
def setUp(self):
f = pysal.open(pysal.examples.get_path("sids2.dbf"))
varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79']
self.names = varnames
vars = [np.array(f.by_col[var]) for var in varnames]
self.vars = vars
self.w = pysal.open(pysal.examples.get_path("sids2.gal")).read()
def test_Moran_BV_matrix(self):
res = moran.Moran_BV_matrix(self.vars, self.w, varnames=self.names)
self.assertAlmostEquals(res[(0, 1)].I, 0.19362610652874668)
self.assertAlmostEquals(res[(3, 0)].I, 0.37701382542927858)
class Moran_Local_Tester(unittest.TestCase):
def setUp(self):
np.random.seed(10)
self.w = pysal.open(pysal.examples.get_path("desmith.gal")).read()
f = pysal.open(pysal.examples.get_path("desmith.txt"))
self.y = np.array(f.by_col['z'])
def test_Moran_Local(self):
lm = moran.Moran_Local(
self.y, self.w, transformation="r", permutations=99)
self.assertAlmostEquals(lm.z_sim[0], -0.68493799168603808)
self.assertAlmostEquals(lm.p_z_sim[0], 0.24669152541631179)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pandas as pd
df = pd.DataFrame(self.y, columns =['z'])
lm = moran.Moran_Local.by_col(df, ['z'], w=self.w, transformation='r',
permutations=99, outvals=['z_sim', 'p_z_sim'])
self.assertAlmostEquals(lm.z_z_sim[0], -0.68493799168603808)
self.assertAlmostEquals(lm.z_p_z_sim[0], 0.24669152541631179)
class Moran_Local_BV_Tester(unittest.TestCase):
def setUp(self):
np.random.seed(10)
self.w = pysal.open(pysal.examples.get_path("sids2.gal")).read()
f = pysal.open(pysal.examples.get_path("sids2.dbf"))
self.x = np.array(f.by_col['SIDR79'])
self.y = np.array(f.by_col['SIDR74'])
def test_Moran_Local_BV(self):
lm = moran.Moran_Local_BV(self.x, self.y, self.w,
transformation="r", permutations=99)
self.assertAlmostEquals(lm.Is[0], 1.4649221250620736)
self.assertAlmostEquals(lm.z_sim[0], 1.5816540860500772)
self.assertAlmostEquals(lm.p_z_sim[0], 0.056864279811026153)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pysal.contrib.pdio as pdio
df = pdio.read_files(pysal.examples.get_path('sids2.dbf'))
np.random.seed(12345)
moran.Moran_Local_BV.by_col(df, ['SIDR74', 'SIDR79'], w=self.w,
inplace=True, outvals=['z_sim', 'p_z_sim'],
transformation='r', permutations=99)
bvstats = df['SIDR79-SIDR74_moran_local_bv'].values
bvz = df['SIDR79-SIDR74_z_sim'].values
bvzp = df['SIDR79-SIDR74_p_z_sim'].values
self.assertAlmostEquals(bvstats[0], 1.4649221250620736)
self.assertAlmostEquals(bvz[0], 1.657427, 5)
self.assertAlmostEquals(bvzp[0], 0.048717, 5)
class Moran_Local_Rate_Tester(unittest.TestCase):
def setUp(self):
np.random.seed(10)
self.w = pysal.open(pysal.examples.get_path("sids2.gal")).read()
f = pysal.open(pysal.examples.get_path("sids2.dbf"))
self.e = np.array(f.by_col['SID79'])
self.b = np.array(f.by_col['BIR79'])
def test_moran_rate(self):
lm = moran.Moran_Local_Rate(self.e, self.b, self.w,
transformation="r", permutations=99)
self.assertAlmostEquals(lm.z_sim[0], -0.13699844503985936, 7)
self.assertAlmostEquals(lm.p_z_sim[0], 0.44551601210081715)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pysal.contrib.pdio as pdio
df = pdio.read_files(pysal.examples.get_path('sids2.dbf'))
lm = moran.Moran_Local_Rate.by_col(df, ['SID79'], ['BIR79'], w=self.w,
outvals=['p_z_sim', 'z_sim'],
transformation='r', permutations=99)
self.assertAlmostEquals(lm['SID79-BIR79_z_sim'][0], -0.13699844503985936, 7)
self.assertAlmostEquals(lm['SID79-BIR79_p_z_sim'][0], 0.44551601210081715)
suite = unittest.TestSuite()
test_classes = [Moran_Tester, Moran_Rate_Tester,
Moran_BV_matrix_Tester, Moran_Local_Tester,
Moran_Local_BV_Tester, Moran_Local_Rate_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| bsd-3-clause |
anthonydillon/horizon | horizon/workflows/base.py | 16 | 33382 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import inspect
import logging
from django.core import urlresolvers
from django import forms
from django.forms.forms import NON_FIELD_ERRORS # noqa
from django import template
from django.template.defaultfilters import linebreaks # noqa
from django.template.defaultfilters import safe # noqa
from django.template.defaultfilters import slugify # noqa
from django.utils.encoding import force_text
from django.utils.importlib import import_module # noqa
from django.utils.translation import ugettext_lazy as _
import six
from horizon import base
from horizon import exceptions
from horizon.templatetags.horizon import has_permissions # noqa
from horizon.utils import html
LOG = logging.getLogger(__name__)
class WorkflowContext(dict):
def __init__(self, workflow, *args, **kwargs):
super(WorkflowContext, self).__init__(*args, **kwargs)
self._workflow = workflow
def __setitem__(self, key, val):
super(WorkflowContext, self).__setitem__(key, val)
return self._workflow._trigger_handlers(key)
def __delitem__(self, key):
return self.__setitem__(key, None)
def set(self, key, val):
return self.__setitem__(key, val)
def unset(self, key):
return self.__delitem__(key)
class ActionMetaclass(forms.forms.DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
# Pop Meta for later processing
opts = attrs.pop("Meta", None)
# Create our new class
cls = super(ActionMetaclass, mcs).__new__(mcs, name, bases, attrs)
# Process options from Meta
cls.name = getattr(opts, "name", name)
cls.slug = getattr(opts, "slug", slugify(name))
cls.permissions = getattr(opts, "permissions", ())
cls.progress_message = getattr(opts,
"progress_message",
_("Processing..."))
cls.help_text = getattr(opts, "help_text", "")
cls.help_text_template = getattr(opts, "help_text_template", None)
return cls
@six.python_2_unicode_compatible
@six.add_metaclass(ActionMetaclass)
class Action(forms.Form):
"""An ``Action`` represents an atomic logical interaction you can have with
the system. This is easier to understand with a conceptual example: in the
context of a "launch instance" workflow, actions would include "naming
the instance", "selecting an image", and ultimately "launching the
instance".
Because ``Actions`` are always interactive, they always provide form
controls, and thus inherit from Django's ``Form`` class. However, they
have some additional intelligence added to them:
* ``Actions`` are aware of the permissions required to complete them.
* ``Actions`` have a meta-level concept of "help text" which is meant to be
displayed in such a way as to give context to the action regardless of
where the action is presented in a site or workflow.
* ``Actions`` understand how to handle their inputs and produce outputs,
much like :class:`~horizon.forms.SelfHandlingForm` does now.
``Action`` classes may define the following attributes in a ``Meta``
class within them:
.. attribute:: name
The verbose name for this action. Defaults to the name of the class.
.. attribute:: slug
A semi-unique slug for this action. Defaults to the "slugified" name
of the class.
.. attribute:: permissions
A list of permission names which this action requires in order to be
completed. Defaults to an empty list (``[]``).
.. attribute:: help_text
A string of simple help text to be displayed alongside the Action's
fields.
.. attribute:: help_text_template
A path to a template which contains more complex help text to be
displayed alongside the Action's fields. In conjunction with
:meth:`~horizon.workflows.Action.get_help_text` method you can
customize your help text template to display practically anything.
"""
def __init__(self, request, context, *args, **kwargs):
if request.method == "POST":
super(Action, self).__init__(request.POST, initial=context)
else:
super(Action, self).__init__(initial=context)
if not hasattr(self, "handle"):
raise AttributeError("The action %s must define a handle method."
% self.__class__.__name__)
self.request = request
self._populate_choices(request, context)
self.required_css_class = 'required'
def __str__(self):
return force_text(self.name)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def _populate_choices(self, request, context):
for field_name, bound_field in self.fields.items():
meth = getattr(self, "populate_%s_choices" % field_name, None)
if meth is not None and callable(meth):
bound_field.choices = meth(request, context)
def get_help_text(self, extra_context=None):
"""Returns the help text for this step."""
text = ""
extra_context = extra_context or {}
if self.help_text_template:
tmpl = template.loader.get_template(self.help_text_template)
context = template.RequestContext(self.request, extra_context)
text += tmpl.render(context)
else:
text += linebreaks(force_text(self.help_text))
return safe(text)
def add_action_error(self, message):
"""Adds an error to the Action's Step based on API issues."""
self.errors[NON_FIELD_ERRORS] = self.error_class([message])
def handle(self, request, context):
"""Handles any requisite processing for this action. The method should
return either ``None`` or a dictionary of data to be passed to
:meth:`~horizon.workflows.Step.contribute`.
Returns ``None`` by default, effectively making it a no-op.
"""
return None
class MembershipAction(Action):
"""An action that allows a user to add/remove members from a group.
Extend the Action class with additional helper method for membership
management.
"""
def get_default_role_field_name(self):
return "default_" + self.slug + "_role"
def get_member_field_name(self, role_id):
return self.slug + "_role_" + role_id
@six.python_2_unicode_compatible
class Step(object):
"""A step is a wrapper around an action which defines its context in a
workflow. It knows about details such as:
* The workflow's context data (data passed from step to step).
* The data which must be present in the context to begin this step (the
step's dependencies).
* The keys which will be added to the context data upon completion of the
step.
* The connections between this step's fields and changes in the context
data (e.g. if that piece of data changes, what needs to be updated in
this step).
A ``Step`` class has the following attributes:
.. attribute:: action_class
The :class:`~horizon.workflows.Action` class which this step wraps.
.. attribute:: depends_on
A list of context data keys which this step requires in order to
begin interaction.
.. attribute:: contributes
A list of keys which this step will contribute to the workflow's
context data. Optional keys should still be listed, even if their
values may be set to ``None``.
.. attribute:: connections
A dictionary which maps context data key names to lists of callbacks.
The callbacks may be functions, dotted python paths to functions
which may be imported, or dotted strings beginning with ``"self"``
to indicate methods on the current ``Step`` instance.
.. attribute:: before
Another ``Step`` class. This optional attribute is used to provide
control over workflow ordering when steps are dynamically added to
workflows. The workflow mechanism will attempt to place the current
step before the step specified in the attribute.
.. attribute:: after
Another ``Step`` class. This attribute has the same purpose as
:meth:`~horizon.workflows.Step.before` except that it will instead
attempt to place the current step after the given step.
.. attribute:: help_text
A string of simple help text which will be prepended to the ``Action``
class' help text if desired.
.. attribute:: template_name
A path to a template which will be used to render this step. In
general the default common template should be used. Default:
``"horizon/common/_workflow_step.html"``.
.. attribute:: has_errors
A boolean value which indicates whether or not this step has any
errors on the action within it or in the scope of the workflow. This
attribute will only accurately reflect this status after validation
has occurred.
.. attribute:: slug
Inherited from the ``Action`` class.
.. attribute:: name
Inherited from the ``Action`` class.
.. attribute:: permissions
Inherited from the ``Action`` class.
"""
action_class = None
depends_on = ()
contributes = ()
connections = None
before = None
after = None
help_text = ""
template_name = "horizon/common/_workflow_step.html"
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __str__(self):
return force_text(self.name)
def __init__(self, workflow):
super(Step, self).__init__()
self.workflow = workflow
cls = self.__class__.__name__
if not (self.action_class and issubclass(self.action_class, Action)):
raise AttributeError("action_class not specified for %s." % cls)
self.slug = self.action_class.slug
self.name = self.action_class.name
self.permissions = self.action_class.permissions
self.has_errors = False
self._handlers = {}
if self.connections is None:
# We want a dict, but don't want to declare a mutable type on the
# class directly.
self.connections = {}
# Gather our connection handlers and make sure they exist.
for key, handlers in self.connections.items():
self._handlers[key] = []
# TODO(gabriel): This is a poor substitute for broader handling
if not isinstance(handlers, (list, tuple)):
raise TypeError("The connection handlers for %s must be a "
"list or tuple." % cls)
for possible_handler in handlers:
if callable(possible_handler):
# If it's callable we know the function exists and is valid
self._handlers[key].append(possible_handler)
continue
elif not isinstance(possible_handler, six.string_types):
raise TypeError("Connection handlers must be either "
"callables or strings.")
bits = possible_handler.split(".")
if bits[0] == "self":
root = self
for bit in bits[1:]:
try:
root = getattr(root, bit)
except AttributeError:
raise AttributeError("The connection handler %s "
"could not be found on %s."
% (possible_handler, cls))
handler = root
elif len(bits) == 1:
# Import by name from local module not supported
raise ValueError("Importing a local function as a string "
"is not supported for the connection "
"handler %s on %s."
% (possible_handler, cls))
else:
# Try a general import
module_name = ".".join(bits[:-1])
try:
mod = import_module(module_name)
handler = getattr(mod, bits[-1])
except ImportError:
raise ImportError("Could not import %s from the "
"module %s as a connection "
"handler on %s."
% (bits[-1], module_name, cls))
except AttributeError:
raise AttributeError("Could not import %s from the "
"module %s as a connection "
"handler on %s."
% (bits[-1], module_name, cls))
self._handlers[key].append(handler)
@property
def action(self):
if not getattr(self, "_action", None):
try:
# Hook in the action context customization.
workflow_context = dict(self.workflow.context)
context = self.prepare_action_context(self.workflow.request,
workflow_context)
self._action = self.action_class(self.workflow.request,
context)
except Exception:
LOG.exception("Problem instantiating action class.")
raise
return self._action
def prepare_action_context(self, request, context):
"""Allows for customization of how the workflow context is passed to
the action; this is the reverse of what "contribute" does to make the
action outputs sane for the workflow. Changes to the context are not
saved globally here. They are localized to the action.
Simply returns the unaltered context by default.
"""
return context
def get_id(self):
"""Returns the ID for this step. Suitable for use in HTML markup."""
return "%s__%s" % (self.workflow.slug, self.slug)
def _verify_contributions(self, context):
for key in self.contributes:
# Make sure we don't skip steps based on weird behavior of
# POST query dicts.
field = self.action.fields.get(key, None)
if field and field.required and not context.get(key):
context.pop(key, None)
failed_to_contribute = set(self.contributes)
failed_to_contribute -= set(context.keys())
if failed_to_contribute:
raise exceptions.WorkflowError("The following expected data was "
"not added to the workflow context "
"by the step %s: %s."
% (self.__class__,
failed_to_contribute))
return True
def contribute(self, data, context):
"""Adds the data listed in ``contributes`` to the workflow's shared
context. By default, the context is simply updated with all the data
returned by the action.
Note that even if the value of one of the ``contributes`` keys is
not present (e.g. optional) the key should still be added to the
context with a value of ``None``.
"""
if data:
for key in self.contributes:
context[key] = data.get(key, None)
return context
def render(self):
"""Renders the step."""
step_template = template.loader.get_template(self.template_name)
extra_context = {"form": self.action,
"step": self}
context = template.RequestContext(self.workflow.request, extra_context)
return step_template.render(context)
def get_help_text(self):
"""Returns the help text for this step."""
text = linebreaks(force_text(self.help_text))
text += self.action.get_help_text()
return safe(text)
def add_step_error(self, message):
"""Adds an error to the Step based on API issues."""
self.action.add_action_error(message)
def has_required_fields(self):
"""Returns True if action contains any required fields."""
return any(field.required for field in self.action.fields.values())
class WorkflowMetaclass(type):
def __new__(mcs, name, bases, attrs):
super(WorkflowMetaclass, mcs).__new__(mcs, name, bases, attrs)
attrs["_cls_registry"] = set([])
return type.__new__(mcs, name, bases, attrs)
class UpdateMembersStep(Step):
"""A step that allows a user to add/remove members from a group.
.. attribute:: show_roles
Set to False to disable the display of the roles dropdown.
.. attribute:: available_list_title
The title used for the available list column.
.. attribute:: members_list_title
The title used for the members list column.
.. attribute:: no_available_text
The placeholder text used when the available list is empty.
.. attribute:: no_members_text
The placeholder text used when the members list is empty.
"""
template_name = "horizon/common/_workflow_step_update_members.html"
show_roles = True
available_list_title = _("All available")
members_list_title = _("Members")
no_available_text = _("None available.")
no_members_text = _("No members.")
def get_member_field_name(self, role_id):
if issubclass(self.action_class, MembershipAction):
return self.action.get_member_field_name(role_id)
else:
return self.slug + "_role_" + role_id
@six.python_2_unicode_compatible
@six.add_metaclass(WorkflowMetaclass)
class Workflow(html.HTMLElement):
"""A Workflow is a collection of Steps. Its interface is very
straightforward, but it is responsible for handling some very
important tasks such as:
* Handling the injection, removal, and ordering of arbitrary steps.
* Determining if the workflow can be completed by a given user at runtime
based on all available information.
* Dispatching connections between steps to ensure that when context data
changes all the applicable callback functions are executed.
* Verifying/validating the overall data integrity and subsequently
triggering the final method to complete the workflow.
The ``Workflow`` class has the following attributes:
.. attribute:: name
The verbose name for this workflow which will be displayed to the user.
Defaults to the class name.
.. attribute:: slug
The unique slug for this workflow. Required.
.. attribute:: steps
Read-only access to the final ordered set of step instances for
this workflow.
.. attribute:: default_steps
A list of :class:`~horizon.workflows.Step` classes which serve as the
starting point for this workflow's ordered steps. Defaults to an empty
list (``[]``).
.. attribute:: finalize_button_name
The name which will appear on the submit button for the workflow's
form. Defaults to ``"Save"``.
.. attribute:: success_message
A string which will be displayed to the user upon successful completion
of the workflow. Defaults to
``"{{ workflow.name }} completed successfully."``
.. attribute:: failure_message
A string which will be displayed to the user upon failure to complete
the workflow. Defaults to ``"{{ workflow.name }} did not complete."``
.. attribute:: depends_on
A roll-up list of all the ``depends_on`` values compiled from the
workflow's steps.
.. attribute:: contributions
A roll-up list of all the ``contributes`` values compiled from the
workflow's steps.
.. attribute:: template_name
Path to the template which should be used to render this workflow.
In general the default common template should be used. Default:
``"horizon/common/_workflow.html"``.
.. attribute:: entry_point
The slug of the step which should initially be active when the
workflow is rendered. This can be passed in upon initialization of
the workflow, or set anytime after initialization but before calling
either ``get_entry_point`` or ``render``.
.. attribute:: redirect_param_name
The name of a parameter used for tracking the URL to redirect to upon
completion of the workflow. Defaults to ``"next"``.
.. attribute:: object
The object (if any) which this workflow relates to. In the case of
a workflow which creates a new resource the object would be the created
resource after the relevant creation steps have been undertaken. In
the case of a workflow which updates a resource it would be the
resource being updated after it has been retrieved.
.. attribute:: wizard
Whether to present the workflow as a wizard, with "prev" and "next"
buttons and validation after every step.
.. attribute:: fullscreen
If the workflow is presented in a modal, and this attribute is
set to True, then the ``fullscreen`` css class will be added so
the modal can take advantage of the available screen estate.
Defaults to ``False``.
"""
slug = None
default_steps = ()
template_name = "horizon/common/_workflow.html"
finalize_button_name = _("Save")
success_message = _("%s completed successfully.")
failure_message = _("%s did not complete.")
redirect_param_name = "next"
multipart = False
wizard = False
fullscreen = False
_registerable_class = Step
def __str__(self):
return self.name
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
super(Workflow, self).__init__(*args, **kwargs)
if self.slug is None:
raise AttributeError("The workflow %s must have a slug."
% self.__class__.__name__)
self.name = getattr(self, "name", self.__class__.__name__)
self.request = request
self.depends_on = set([])
self.contributions = set([])
self.entry_point = entry_point
self.object = None
# Put together our steps in order. Note that we pre-register
# non-default steps so that we can identify them and subsequently
# insert them in order correctly.
self._registry = dict([(step_class, step_class(self)) for step_class
in self.__class__._cls_registry
if step_class not in self.default_steps])
self._gather_steps()
# Determine all the context data we need to end up with.
for step in self.steps:
self.depends_on = self.depends_on | set(step.depends_on)
self.contributions = self.contributions | set(step.contributes)
# Initialize our context. For ease we can preseed it with a
# regular dictionary. This should happen after steps have been
# registered and ordered.
self.context = WorkflowContext(self)
context_seed = context_seed or {}
clean_seed = dict([(key, val)
for key, val in context_seed.items()
if key in self.contributions | self.depends_on])
self.context_seed = clean_seed
self.context.update(clean_seed)
if request and request.method == "POST":
for step in self.steps:
valid = step.action.is_valid()
# Be sure to use the CLEANED data if the workflow is valid.
if valid:
data = step.action.cleaned_data
else:
data = request.POST
self.context = step.contribute(data, self.context)
@property
def steps(self):
if getattr(self, "_ordered_steps", None) is None:
self._gather_steps()
return self._ordered_steps
def get_step(self, slug):
"""Returns the instantiated step matching the given slug."""
for step in self.steps:
if step.slug == slug:
return step
def _gather_steps(self):
ordered_step_classes = self._order_steps()
for default_step in self.default_steps:
self.register(default_step)
self._registry[default_step] = default_step(self)
self._ordered_steps = [self._registry[step_class]
for step_class in ordered_step_classes
if has_permissions(self.request.user,
self._registry[step_class])]
def _order_steps(self):
steps = list(copy.copy(self.default_steps))
additional = self._registry.keys()
for step in additional:
try:
min_pos = steps.index(step.after)
except ValueError:
min_pos = 0
try:
max_pos = steps.index(step.before)
except ValueError:
max_pos = len(steps)
if min_pos > max_pos:
raise exceptions.WorkflowError("The step %(new)s can't be "
"placed between the steps "
"%(after)s and %(before)s; the "
"step %(before)s comes before "
"%(after)s."
% {"new": additional,
"after": step.after,
"before": step.before})
steps.insert(max_pos, step)
return steps
def get_entry_point(self):
"""Returns the slug of the step which the workflow should begin on.
This method takes into account both already-available data and errors
within the steps.
"""
# If we have a valid specified entry point, use it.
if self.entry_point:
if self.get_step(self.entry_point):
return self.entry_point
# Otherwise fall back to calculating the appropriate entry point.
for step in self.steps:
if step.has_errors:
return step.slug
try:
step._verify_contributions(self.context)
except exceptions.WorkflowError:
return step.slug
# If nothing else, just return the first step.
return self.steps[0].slug
def _trigger_handlers(self, key):
responses = []
handlers = [(step.slug, f) for step in self.steps
for f in step._handlers.get(key, [])]
for slug, handler in handlers:
responses.append((slug, handler(self.request, self.context)))
return responses
@classmethod
def register(cls, step_class):
"""Registers a :class:`~horizon.workflows.Step` with the workflow."""
if not inspect.isclass(step_class):
raise ValueError('Only classes may be registered.')
elif not issubclass(step_class, cls._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% cls._registerable_class.__name__)
if step_class in cls._cls_registry:
return False
else:
cls._cls_registry.add(step_class)
return True
@classmethod
def unregister(cls, step_class):
"""Unregisters a :class:`~horizon.workflows.Step` from the workflow.
"""
try:
cls._cls_registry.remove(step_class)
except KeyError:
raise base.NotRegistered('%s is not registered' % cls)
return cls._unregister(step_class)
def validate(self, context):
"""Hook for custom context data validation. Should return a boolean
value or raise :class:`~horizon.exceptions.WorkflowValidationError`.
"""
return True
def is_valid(self):
"""Verified that all required data is present in the context and
calls the ``validate`` method to allow for finer-grained checks
on the context data.
"""
missing = self.depends_on - set(self.context.keys())
if missing:
raise exceptions.WorkflowValidationError(
"Unable to complete the workflow. The values %s are "
"required but not present." % ", ".join(missing))
# Validate each step. Cycle through all of them to catch all errors
# in one pass before returning.
steps_valid = True
for step in self.steps:
if not step.action.is_valid():
steps_valid = False
step.has_errors = True
if not steps_valid:
return steps_valid
return self.validate(self.context)
def finalize(self):
"""Finalizes a workflow by running through all the actions in order
and calling their ``handle`` methods. Returns ``True`` on full success,
or ``False`` for a partial success, e.g. there were non-critical
errors. (If it failed completely the function wouldn't return.)
"""
partial = False
for step in self.steps:
try:
data = step.action.handle(self.request, self.context)
if data is True or data is None:
continue
elif data is False:
partial = True
else:
self.context = step.contribute(data or {}, self.context)
except Exception:
partial = True
exceptions.handle(self.request)
if not self.handle(self.request, self.context):
partial = True
return not partial
def handle(self, request, context):
"""Handles any final processing for this workflow. Should return a
boolean value indicating success.
"""
return True
def get_success_url(self):
"""Returns a URL to redirect the user to upon completion. By default it
will attempt to parse a ``success_url`` attribute on the workflow,
which can take the form of a reversible URL pattern name, or a
standard HTTP URL.
"""
try:
return urlresolvers.reverse(self.success_url)
except urlresolvers.NoReverseMatch:
return self.success_url
def format_status_message(self, message):
"""Hook to allow customization of the message returned to the user
upon successful or unsuccessful completion of the workflow.
By default it simply inserts the workflow's name into the message
string.
"""
if "%s" in message:
return message % self.name
else:
return message
def render(self):
"""Renders the workflow."""
workflow_template = template.loader.get_template(self.template_name)
extra_context = {"workflow": self}
if self.request.is_ajax():
extra_context['modal'] = True
context = template.RequestContext(self.request, extra_context)
return workflow_template.render(context)
def get_absolute_url(self):
"""Returns the canonical URL for this workflow.
This is used for the POST action attribute on the form element
wrapping the workflow.
For convenience it defaults to the value of
``request.get_full_path()`` with any query string stripped off,
e.g. the path at which the workflow was requested.
"""
return self.request.get_full_path().partition('?')[0]
def add_error_to_step(self, message, slug):
"""Adds an error to the workflow's Step with the
specified slug based on API issues. This is useful
when you wish for API errors to appear as errors on
the form rather than using the messages framework.
"""
step = self.get_step(slug)
if step:
step.add_step_error(message)
| apache-2.0 |
ff0000/red-fab-deploy | fab_deploy/amazon/api.py | 1 | 8655 |
import os, sys
import time
from ConfigParser import ConfigParser
import boto
from boto import ec2
from boto.ec2 import elb
from boto.ec2.connection import EC2Connection
from boto.ec2.elb import HealthCheck
from fabric.api import env, execute, local
from fabric.tasks import Task
from fab_deploy import functions
from utils import get_security_group
DEFAULT_AMI = 'ami-5965401c' # ubuntu 12.04 x86_64
DEFAULT_INSTANCE_TYPE = 'm1.medium'
DEFAULT_REGION = 'us-west-1'
def get_ec2_connection(server_type, **kwargs):
"""
Create and return a valid connection to AWS.
To establish a valid connection, aws_access_key and aws_secret_key have to
be defined in a file specified by env.AWS_CREDENTIAL, with a format similar
to server.ini file. You should define env.AWS_CREDENTIAL in your fabfile.
By default, this function looks into $PROJECT_DIR/deploy/amazon.ini for the
credential information, and this file should has a section named 'amazon-aws'
and containing lines defining aws_access_key and aws_secret_key, like below
[amazon-aws]
aws_access_key =
aws_secret_key =
"""
amzn = env.get('AWS_CREDENTIAL',
os.path.join(env.deploy_path, 'amazon.ini'))
if not os.path.exists(amzn):
print ("Cannot find environment variable AMAZON_CREDENTIALS which should"
" point to a file with your aws_access_key and aws_secret_key info"
" inside. You may specify it through your fab env.")
sys.exit(1)
parser = ConfigParser()
parser.read(amzn)
aws_access_key = parser.get('amazon-aws', 'aws_access_key')
aws_secret_key = parser.get('amazon-aws', 'aws_secret_key')
if not aws_access_key or not aws_secret_key:
print "You must specify your amazon aws credentials to your env."
sys.exit(1)
region = kwargs.get('region', env.get('region'))
if not region:
region = DEFAULT_REGION
if server_type == 'ec2':
conn = ec2.connect_to_region(region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
return conn
elif server_type == 'elb':
conn = elb.connect_to_region(region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
return conn
class CreateKeyPair(Task):
"""
Create an AWS key pair.
This task should be run before you try to add any type of server, because
task api.add_server will look for the key pair on your local machine.
AWS requires a key pair to create EC2 instances, and the same key file is
needed to login to the instances. This task creates a key pair, and
save its content in a file located under the same directory as
env.AWS_CREDENTIAL file. The key name and file location will be registered
into the file specified by env.AWS_CREDENTIAL.
You are responsible to keep the file in a secure place and never lose it.
Make your own decision if you should push the key file to remote repo, or
let git ignore it.
"""
name = 'create_key'
serial = True
section = 'amazon-aws'
def run(self, **kwargs):
conn = get_ec2_connection(server_type='ec2', **kwargs)
sys.stdout.write("Please give a name to the key: ")
amzn = env.get('AWS_CREDENTIAL',
os.path.join(env.deploy_path, 'amazon.ini'))
key_dir = os.path.dirname(amzn)
while True:
key_name = raw_input()
key_file = os.path.join(key_dir, key_name+'.pem')
key = conn.get_key_pair(key_name)
if key:
if os.path.exists(key_file):
print ("Looks like key file %s already exists on your "
"machine. I will skip creating, and just use it."
%key_file)
break
else:
print ("Key '%s' already exist on AWS, but I couldn't "
"find it at %s. We need to create a new key, please"
"give a name to the key: " %(key.name, key_file))
continue
else:
key = conn.create_key_pair(key_name)
key.save(key_dir)
break
parser = ConfigParser()
parser.read(amzn)
if not parser.has_section(self.section):
parser.add_section(self.section)
parser.set(self.section, 'ec2-key-name', key.name)
parser.set(self.section, 'ec2-key-file', key_file)
fp = open(amzn, 'w')
parser.write(fp)
fp.close()
local('ssh-add %s' %key_file)
class New(Task):
"""
Provisions and set up a new amazon AWS EC2 instance
This task reads in a number of variables defining the properties of EC2
instance, and create it. Finally, if the instance is created successfully,
this task will output its properties, and set up the instance as certain
type of server by execute another task with the name of setup.***.
You may provide the following parameters through command line.
* **type**: Required. server types, can be db_server, app_server,
dev_server, or slave_db
* **region**: default is us-west-1
* **ami_id**: AMI ID
* **static_ip**: Set to true to use. By default this is not used.
"""
name = 'add_server'
serial = True
def run(self, **kwargs):
assert not env.hosts
conn = get_ec2_connection(server_type='ec2', **kwargs)
type = kwargs.get('type')
setup_name = 'setup.%s' % type
instance_type = DEFAULT_INSTANCE_TYPE
ami_id = kwargs.get('ami_id')
if not ami_id:
ami_id = DEFAULT_AMI
task = functions.get_task_instance(setup_name)
if task:
if hasattr(task, 'instance_type'):
instance_type = task.instance_type
if hasattr(task, 'ami'):
ami_id = task.ami
else:
print "I don't know how to add a %s server" % type
sys.exit(1)
amzn = env.get('AWS_CREDENTIAL',
os.path.join(env.deploy_path, 'amazon.ini'))
parser = ConfigParser()
parser.read(amzn)
key_name = parser.get('amazon-aws', 'ec2-key-name')
key_file = parser.get('amazon-aws', 'ec2-key-file')
if not key_name:
print "Sorry. You need to create key pair with create_key first."
sys.exit(1)
elif not os.path.exists(key_file):
print ("I find key %s in server.ini file, but the key file is not"
" on its location %s. There is something wrong. Please fix "
"it, or recreate key pair" % (key_name, key_file))
sys.exit(1)
image = conn.get_image(ami_id)
security_group = get_security_group(conn, task.config_section)
name = functions.get_remote_name(None, task.config_section,
name=kwargs.get('name'))
SERVER = {
'image_id': image.id,
'instance_type': instance_type,
'security_groups': [security_group],
'key_name': key_name,}
reservation = conn.run_instances(**SERVER)
print reservation
instance = reservation.instances[0]
while instance.state != 'running':
time.sleep(5)
instance.update()
print "...instance state: %s" % (instance.state)
conn.create_tags([instance.id], {"Name": name})
if not kwargs.get('static_ip', False):
ip = instance.ip_address
else:
elastic_ip = conn.allocate_address()
print "...Elastic IP %s allocated" % elastic_ip
elastic_ip.associate(instance.id)
ip = elastic_ip.public_ip
print "...EC2 instance is successfully created."
print "...wait 5 seconds for the server to be ready"
print "...while waiting, you may want to note down the following info"
time.sleep(5)
print "..."
print "...Instance using image: %s" % image.name
print "...Added into security group: %s" %security_group.name
print "...Instance ID: %s" % instance.id
print "...Public IP: %s" % ip
host_string = 'ubuntu@%s' % instance.public_dns_name
execute(setup_name, name=name, hosts=[host_string])
create_key = CreateKeyPair()
add_server = New()
| mit |
gunzy83/ansible-modules-extras | cloud/amazon/ec2_vpc_igw.py | 64 | 4572 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_igw
short_description: Manage an AWS VPC Internet gateway
description:
- Manage an AWS VPC Internet gateway
version_added: "2.0"
author: Robert Estelle (@erydo)
options:
vpc_id:
description:
- The VPC ID for the VPC in which to manage the Internet Gateway.
required: true
default: null
state:
description:
- Create or terminate the IGW
required: false
default: present
choices: [ 'present', 'absent' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Ensure that the VPC has an Internet Gateway.
# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc.
ec2_vpc_igw:
vpc_id: vpc-abcdefgh
state: present
register: igw
'''
import sys # noqa
try:
import boto.ec2
import boto.vpc
from boto.exception import EC2ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
if __name__ != '__main__':
raise
class AnsibleIGWException(Exception):
pass
def ensure_igw_absent(vpc_conn, vpc_id, check_mode):
igws = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc_id})
if not igws:
return {'changed': False}
if check_mode:
return {'changed': True}
for igw in igws:
try:
vpc_conn.detach_internet_gateway(igw.id, vpc_id)
vpc_conn.delete_internet_gateway(igw.id)
except EC2ResponseError as e:
raise AnsibleIGWException(
'Unable to delete Internet Gateway, error: {0}'.format(e))
return {'changed': True}
def ensure_igw_present(vpc_conn, vpc_id, check_mode):
igws = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc_id})
if len(igws) > 1:
raise AnsibleIGWException(
'EC2 returned more than one Internet Gateway for VPC {0}, aborting'
.format(vpc_id))
if igws:
return {'changed': False, 'gateway_id': igws[0].id}
else:
if check_mode:
return {'changed': True, 'gateway_id': None}
try:
igw = vpc_conn.create_internet_gateway()
vpc_conn.attach_internet_gateway(igw.id, vpc_id)
return {'changed': True, 'gateway_id': igw.id}
except EC2ResponseError as e:
raise AnsibleIGWException(
'Unable to create Internet Gateway, error: {0}'.format(e))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
vpc_id = dict(required=True),
state = dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
vpc_id = module.params.get('vpc_id')
state = module.params.get('state', 'present')
try:
if state == 'present':
result = ensure_igw_present(connection, vpc_id, check_mode=module.check_mode)
elif state == 'absent':
result = ensure_igw_absent(connection, vpc_id, check_mode=module.check_mode)
except AnsibleIGWException as e:
module.fail_json(msg=str(e))
module.exit_json(**result)
from ansible.module_utils.basic import * # noqa
from ansible.module_utils.ec2 import * # noqa
if __name__ == '__main__':
main()
| gpl-3.0 |
Arzie/deluge | deluge/tests/test_maketorrent.py | 3 | 2369 | import os
import tempfile
from twisted.trial import unittest
from deluge import maketorrent
def check_torrent(filename):
# Test loading with libtorrent to make sure it's valid
from deluge._libtorrent import lt
lt.torrent_info(filename)
# Test loading with our internal TorrentInfo class
from deluge.ui.common import TorrentInfo
TorrentInfo(filename)
class MakeTorrentTestCase(unittest.TestCase):
def test_save_multifile(self):
# Create a temporary folder for torrent creation
tmp_path = tempfile.mkdtemp()
open(os.path.join(tmp_path, "file_A"), "wb").write("a" * (312 * 1024))
open(os.path.join(tmp_path, "file_B"), "wb").write("b" * (2354 * 1024))
open(os.path.join(tmp_path, "file_C"), "wb").write("c" * (11 * 1024))
t = maketorrent.TorrentMetadata()
t.data_path = tmp_path
tmp_file = tempfile.mkstemp(".torrent")[1]
t.save(tmp_file)
check_torrent(tmp_file)
os.remove(os.path.join(tmp_path, "file_A"))
os.remove(os.path.join(tmp_path, "file_B"))
os.remove(os.path.join(tmp_path, "file_C"))
os.rmdir(tmp_path)
os.remove(tmp_file)
def test_save_singlefile(self):
tmp_data = tempfile.mkstemp("testdata")[1]
open(tmp_data, "wb").write("a" * (2314 * 1024))
t = maketorrent.TorrentMetadata()
t.data_path = tmp_data
tmp_file = tempfile.mkstemp(".torrent")[1]
t.save(tmp_file)
check_torrent(tmp_file)
os.remove(tmp_data)
os.remove(tmp_file)
def test_save_multifile_padded(self):
# Create a temporary folder for torrent creation
tmp_path = tempfile.mkdtemp()
open(os.path.join(tmp_path, "file_A"), "wb").write("a" * (312 * 1024))
open(os.path.join(tmp_path, "file_B"), "wb").write("b" * (2354 * 1024))
open(os.path.join(tmp_path, "file_C"), "wb").write("c" * (11 * 1024))
t = maketorrent.TorrentMetadata()
t.data_path = tmp_path
t.pad_files = True
tmp_file = tempfile.mkstemp(".torrent")[1]
t.save(tmp_file)
check_torrent(tmp_file)
os.remove(os.path.join(tmp_path, "file_A"))
os.remove(os.path.join(tmp_path, "file_B"))
os.remove(os.path.join(tmp_path, "file_C"))
os.rmdir(tmp_path)
os.remove(tmp_file)
| gpl-3.0 |
ishay2b/tensorflow | tensorflow/python/util/keyword_args.py | 190 | 1657 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keyword args functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.util import decorator_utils
def keyword_args_only(func):
"""Decorator for marking specific function accepting keyword args only.
This decorator raises a `ValueError` if the input `func` is called with any
non-keyword args. This prevents the caller from providing the arguments in
wrong order.
Args:
func: The function or method needed to be decorated.
Returns:
Decorated function or method.
Raises:
ValueError: If `func` is not callable.
"""
decorator_utils.validate_callable(func, "keyword_args_only")
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Keyword args only wrapper."""
if args:
raise ValueError(
"Must use keyword args to call {}.".format(func.__name__))
return func(**kwargs)
return new_func
| apache-2.0 |
JakubBrachTieto/openthread | tools/harness-automation/cases/med_6_3_2.py | 16 | 1869 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class MED_6_3_2(HarnessCase):
role = HarnessCase.ROLE_MED
case = '6 3 2'
golden_devices_required = 1
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
aprefontaine/TMScheduler | django/contrib/contenttypes/tests.py | 60 | 1335 | """
Make sure that the content type cache (see ContentTypeManager) works correctly.
Lookups for a particular content type -- by model or by ID -- should hit the
database only on the first lookup.
First, let's make sure we're dealing with a blank slate (and that DEBUG is on so
that queries get logged)::
>>> from django.conf import settings
>>> settings.DEBUG = True
>>> from django.contrib.contenttypes.models import ContentType
>>> ContentType.objects.clear_cache()
>>> from django import db
>>> db.reset_queries()
At this point, a lookup for a ContentType should hit the DB::
>>> ContentType.objects.get_for_model(ContentType)
<ContentType: content type>
>>> len(db.connection.queries)
1
A second hit, though, won't hit the DB, nor will a lookup by ID::
>>> ct = ContentType.objects.get_for_model(ContentType)
>>> len(db.connection.queries)
1
>>> ContentType.objects.get_for_id(ct.id)
<ContentType: content type>
>>> len(db.connection.queries)
1
Once we clear the cache, another lookup will again hit the DB::
>>> ContentType.objects.clear_cache()
>>> ContentType.objects.get_for_model(ContentType)
<ContentType: content type>
>>> len(db.connection.queries)
2
Don't forget to reset DEBUG!
>>> settings.DEBUG = False
""" | bsd-3-clause |
brendangregg/bcc | tools/offcputime.py | 2 | 11671 | #!/usr/bin/python
#
# offcputime Summarize off-CPU time by stack trace
# For Linux, uses BCC, eBPF.
#
# USAGE: offcputime [-h] [-p PID | -u | -k] [-U | -K] [-f] [duration]
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# 13-Jan-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF
from sys import stderr
from time import sleep, strftime
import argparse
import errno
import signal
# arg validation
def positive_int(val):
try:
ival = int(val)
except ValueError:
raise argparse.ArgumentTypeError("must be an integer")
if ival < 0:
raise argparse.ArgumentTypeError("must be positive")
return ival
def positive_nonzero_int(val):
ival = positive_int(val)
if ival == 0:
raise argparse.ArgumentTypeError("must be nonzero")
return ival
def stack_id_err(stack_id):
# -EFAULT in get_stackid normally means the stack-trace is not available,
# Such as getting kernel stack trace in userspace code
return (stack_id < 0) and (stack_id != -errno.EFAULT)
# arguments
examples = """examples:
./offcputime # trace off-CPU stack time until Ctrl-C
./offcputime 5 # trace for 5 seconds only
./offcputime -f 5 # 5 seconds, and output in folded format
./offcputime -m 1000 # trace only events that last more than 1000 usec
./offcputime -M 10000 # trace only events that last less than 10000 usec
./offcputime -p 185 # only trace threads for PID 185
./offcputime -t 188 # only trace thread 188
./offcputime -u # only trace user threads (no kernel)
./offcputime -k # only trace kernel threads (no user)
./offcputime -U # only show user space stacks (no kernel)
./offcputime -K # only show kernel space stacks (no user)
"""
parser = argparse.ArgumentParser(
description="Summarize off-CPU time by stack trace",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
thread_group = parser.add_mutually_exclusive_group()
# Note: this script provides --pid and --tid flags but their arguments are
# referred to internally using kernel nomenclature: TGID and PID.
thread_group.add_argument("-p", "--pid", metavar="PID", dest="tgid",
help="trace this PID only", type=positive_int)
thread_group.add_argument("-t", "--tid", metavar="TID", dest="pid",
help="trace this TID only", type=positive_int)
thread_group.add_argument("-u", "--user-threads-only", action="store_true",
help="user threads only (no kernel threads)")
thread_group.add_argument("-k", "--kernel-threads-only", action="store_true",
help="kernel threads only (no user threads)")
stack_group = parser.add_mutually_exclusive_group()
stack_group.add_argument("-U", "--user-stacks-only", action="store_true",
help="show stacks from user space only (no kernel space stacks)")
stack_group.add_argument("-K", "--kernel-stacks-only", action="store_true",
help="show stacks from kernel space only (no user space stacks)")
parser.add_argument("-d", "--delimited", action="store_true",
help="insert delimiter between kernel/user stacks")
parser.add_argument("-f", "--folded", action="store_true",
help="output folded format")
parser.add_argument("--stack-storage-size", default=1024,
type=positive_nonzero_int,
help="the number of unique stack traces that can be stored and "
"displayed (default 1024)")
parser.add_argument("duration", nargs="?", default=99999999,
type=positive_nonzero_int,
help="duration of trace, in seconds")
parser.add_argument("-m", "--min-block-time", default=1,
type=positive_nonzero_int,
help="the amount of time in microseconds over which we " +
"store traces (default 1)")
parser.add_argument("-M", "--max-block-time", default=(1 << 64) - 1,
type=positive_nonzero_int,
help="the amount of time in microseconds under which we " +
"store traces (default U64_MAX)")
parser.add_argument("--state", type=positive_int,
help="filter on this thread state bitmask (eg, 2 == TASK_UNINTERRUPTIBLE" +
") see include/linux/sched.h")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
folded = args.folded
duration = int(args.duration)
debug = 0
# signal handler
def signal_ignore(signal, frame):
print()
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
#define MINBLOCK_US MINBLOCK_US_VALUEULL
#define MAXBLOCK_US MAXBLOCK_US_VALUEULL
struct key_t {
u32 pid;
u32 tgid;
int user_stack_id;
int kernel_stack_id;
char name[TASK_COMM_LEN];
};
BPF_HASH(counts, struct key_t);
BPF_HASH(start, u32);
BPF_STACK_TRACE(stack_traces, STACK_STORAGE_SIZE);
int oncpu(struct pt_regs *ctx, struct task_struct *prev) {
u32 pid = prev->pid;
u32 tgid = prev->tgid;
u64 ts, *tsp;
// record previous thread sleep time
if ((THREAD_FILTER) && (STATE_FILTER)) {
ts = bpf_ktime_get_ns();
start.update(&pid, &ts);
}
// get the current thread's start time
pid = bpf_get_current_pid_tgid();
tgid = bpf_get_current_pid_tgid() >> 32;
tsp = start.lookup(&pid);
if (tsp == 0) {
return 0; // missed start or filtered
}
// calculate current thread's delta time
u64 delta = bpf_ktime_get_ns() - *tsp;
start.delete(&pid);
delta = delta / 1000;
if ((delta < MINBLOCK_US) || (delta > MAXBLOCK_US)) {
return 0;
}
// create map key
struct key_t key = {};
key.pid = pid;
key.tgid = tgid;
key.user_stack_id = USER_STACK_GET;
key.kernel_stack_id = KERNEL_STACK_GET;
bpf_get_current_comm(&key.name, sizeof(key.name));
counts.increment(key, delta);
return 0;
}
"""
# set thread filter
thread_context = ""
if args.tgid is not None:
thread_context = "PID %d" % args.tgid
thread_filter = 'tgid == %d' % args.tgid
elif args.pid is not None:
thread_context = "TID %d" % args.pid
thread_filter = 'pid == %d' % args.pid
elif args.user_threads_only:
thread_context = "user threads"
thread_filter = '!(prev->flags & PF_KTHREAD)'
elif args.kernel_threads_only:
thread_context = "kernel threads"
thread_filter = 'prev->flags & PF_KTHREAD'
else:
thread_context = "all threads"
thread_filter = '1'
if args.state == 0:
state_filter = 'prev->state == 0'
elif args.state:
# these states are sometimes bitmask checked
state_filter = 'prev->state & %d' % args.state
else:
state_filter = '1'
bpf_text = bpf_text.replace('THREAD_FILTER', thread_filter)
bpf_text = bpf_text.replace('STATE_FILTER', state_filter)
# set stack storage size
bpf_text = bpf_text.replace('STACK_STORAGE_SIZE', str(args.stack_storage_size))
bpf_text = bpf_text.replace('MINBLOCK_US_VALUE', str(args.min_block_time))
bpf_text = bpf_text.replace('MAXBLOCK_US_VALUE', str(args.max_block_time))
# handle stack args
kernel_stack_get = "stack_traces.get_stackid(ctx, 0)"
user_stack_get = "stack_traces.get_stackid(ctx, BPF_F_USER_STACK)"
stack_context = ""
if args.user_stacks_only:
stack_context = "user"
kernel_stack_get = "-1"
elif args.kernel_stacks_only:
stack_context = "kernel"
user_stack_get = "-1"
else:
stack_context = "user + kernel"
bpf_text = bpf_text.replace('USER_STACK_GET', user_stack_get)
bpf_text = bpf_text.replace('KERNEL_STACK_GET', kernel_stack_get)
need_delimiter = args.delimited and not (args.kernel_stacks_only or
args.user_stacks_only)
# check for an edge case; the code below will handle this case correctly
# but ultimately nothing will be displayed
if args.kernel_threads_only and args.user_stacks_only:
print("ERROR: Displaying user stacks for kernel threads " +
"doesn't make sense.", file=stderr)
exit(1)
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# initialize BPF
b = BPF(text=bpf_text)
b.attach_kprobe(event="finish_task_switch", fn_name="oncpu")
matched = b.num_open_kprobes()
if matched == 0:
print("error: 0 functions traced. Exiting.", file=stderr)
exit(1)
# header
if not folded:
print("Tracing off-CPU time (us) of %s by %s stack" %
(thread_context, stack_context), end="")
if duration < 99999999:
print(" for %d secs." % duration)
else:
print("... Hit Ctrl-C to end.")
try:
sleep(duration)
except KeyboardInterrupt:
# as cleanup can take many seconds, trap Ctrl-C:
signal.signal(signal.SIGINT, signal_ignore)
if not folded:
print()
missing_stacks = 0
has_enomem = False
counts = b.get_table("counts")
stack_traces = b.get_table("stack_traces")
for k, v in sorted(counts.items(), key=lambda counts: counts[1].value):
# handle get_stackid errors
if not args.user_stacks_only and stack_id_err(k.kernel_stack_id):
missing_stacks += 1
has_enomem = has_enomem or k.kernel_stack_id == -errno.ENOMEM
if not args.kernel_stacks_only and stack_id_err(k.user_stack_id):
missing_stacks += 1
has_enomem = has_enomem or k.user_stack_id == -errno.ENOMEM
# user stacks will be symbolized by tgid, not pid, to avoid the overhead
# of one symbol resolver per thread
user_stack = [] if k.user_stack_id < 0 else \
stack_traces.walk(k.user_stack_id)
kernel_stack = [] if k.kernel_stack_id < 0 else \
stack_traces.walk(k.kernel_stack_id)
if folded:
# print folded stack output
user_stack = list(user_stack)
kernel_stack = list(kernel_stack)
line = [k.name.decode('utf-8', 'replace')]
# if we failed to get the stack is, such as due to no space (-ENOMEM) or
# hash collision (-EEXIST), we still print a placeholder for consistency
if not args.kernel_stacks_only:
if stack_id_err(k.user_stack_id):
line.append("[Missed User Stack]")
else:
line.extend([b.sym(addr, k.tgid).decode('utf-8', 'replace')
for addr in reversed(user_stack)])
if not args.user_stacks_only:
line.extend(["-"] if (need_delimiter and k.kernel_stack_id >= 0 and k.user_stack_id >= 0) else [])
if stack_id_err(k.kernel_stack_id):
line.append("[Missed Kernel Stack]")
else:
line.extend([b.ksym(addr).decode('utf-8', 'replace')
for addr in reversed(kernel_stack)])
print("%s %d" % (";".join(line), v.value))
else:
# print default multi-line stack output
if not args.user_stacks_only:
if stack_id_err(k.kernel_stack_id):
print(" [Missed Kernel Stack]")
else:
for addr in kernel_stack:
print(" %s" % b.ksym(addr))
if not args.kernel_stacks_only:
if need_delimiter and k.user_stack_id >= 0 and k.kernel_stack_id >= 0:
print(" --")
if stack_id_err(k.user_stack_id):
print(" [Missed User Stack]")
else:
for addr in user_stack:
print(" %s" % b.sym(addr, k.tgid))
print(" %-16s %s (%d)" % ("-", k.name.decode('utf-8', 'replace'), k.pid))
print(" %d\n" % v.value)
if missing_stacks > 0:
enomem_str = "" if not has_enomem else \
" Consider increasing --stack-storage-size."
print("WARNING: %d stack traces lost and could not be displayed.%s" %
(missing_stacks, enomem_str),
file=stderr)
| apache-2.0 |
GNOME/accerciser | src/lib/accerciser/hotkey_manager.py | 1 | 14276 | '''
Defines the manager for global hot keys.
@author: Eitan Isaacson
@organization: IBM Corporation
@copyright: Copyright (c) 2006, 2007 IBM Corporation
@license: BSD
All rights reserved. This program and the accompanying materials are made
available under the terms of the BSD which accompanies this distribution, and
is available at U{http://www.opensource.org/licenses/bsd-license.php}
'''
from gi.repository import Gtk as gtk
from gi.repository import Gdk as gdk
from gi.repository.Gio import Settings as GSettings
from .i18n import _
import pyatspi
HOTKEYS_GSCHEMA = 'org.a11y.Accerciser.hotkeys'
HOTKEYS_BASEPATH = '/org/a11y/accerciser/hotkeys/'
COL_COMPONENT = 0
COL_DESC = 1
COL_CALLBACK = 2
COL_KEYPRESS = 3
COL_MOD = 4
COL_LOCALIZED_COMP = 5
def _charToKeySym(key):
'''
A convinience function to convert either a character, or key name to it's
respective keyval
@param key: The character or key name to convert.
@type key: string
@return: A key symbol
@rtype: long
'''
try:
rv = gdk.unicode_to_keyval(ord(key))
except:
rv = getattr(gdk, 'KEY_%s' % key)
return rv
class HotkeyManager(gtk.ListStore):
'''
A model that stores all of the global key bindings. All accerciser components
that need global hotkeys should register the key combination and callback
with the main instance of this class.
'''
def __init__(self):
'''
Constructor for the L{HotkeyManager}
'''
gtk.ListStore.__init__(self, str, str, object, int, int, str)
self.connect('row-changed', self._onComboChanged)
masks = [mask for mask in pyatspi.allModifiers()]
pyatspi.Registry.registerKeystrokeListener(
self._accEventKeyPressed, mask=masks, kind=(pyatspi.KEY_PRESSED_EVENT,))
def _accEventKeyPressed(self, event):
'''
Handle certain key presses globally. Pass on to the hotkey manager the
key combinations pressed for further processing.
@param event: The event that is being handled.
@type event: L{pyatspi.event.Event}
'''
handled = self.hotkeyPress(event.hw_code, event.modifiers)
event.consume = handled
def hotkeyPress(self, key, modifiers):
'''
Call the appropriate callbacks for given key combination. This method
should be called by an at-spi keyboard:press event handler in the
main program.
@param key: The pressed key code.
@type key: integer
@param modifiers: The modifiers that were depressed during the keystroke.
@type modifiers: integer
'''
km = gdk.Keymap.get_default()
callback = None
for combo in self:
success, entries = km.get_entries_for_keyval(combo[COL_KEYPRESS])
if not success: continue
if key in [int(entry.keycode) for entry in entries] and \
modifiers & combo[COL_MOD] == combo[COL_MOD]:
callback = combo[COL_CALLBACK]
if callback:
callback()
return bool(callback)
def addKeyCombo(self, component, localized_component, description,
callback, keypress, modifiers):
'''
Adds the given key combination with the appropriate callbacks to
the L{HotkeyManager}. If an identical description with the identical
component already exists in the model, just reassign with the new callback.
I{Note:} It is important that the component and description strings be
unique.
@param component: The component name, usually the plugin name, or "Core".
@type component: string
@param description: A description of the action performed during the given
keycombo.
@type description: string
@param callback: The callback to call when the given key combination
is pressed.
@type callback: callable
@param keypress: The key symbol of the keystroke that performs given operation.
@type keypress: long
@param modifiers: The modifiers that must be depressed for function to
be perfomed.
@type modifiers: int
'''
component_desc_pairs = list(zip([row[COL_COMPONENT] for row in self],
[row[COL_DESC] for row in self]))
if (component, description) in component_desc_pairs:
path = component_desc_pairs.index((component, description))
self[path][COL_CALLBACK] = callback
else:
gspath = self._getComboGSettingsPath(component, description)
gsettings = GSettings.new_with_path(HOTKEYS_GSCHEMA, gspath)
if gsettings.get_string('hotkey-combo'):
final_keypress, final_modifiers = gtk.accelerator_parse(
gsettings.get_string('hotkey-combo'))
else:
final_keypress, final_modifiers = keypress, modifiers
self.append([component, description, callback,
int(final_keypress), final_modifiers, localized_component])
def removeKeyCombo(self, component, description, callback, key, modifiers):
'''
Removes the given callback from L{HotkeyManager}. It does not erase the
entire key combo entry.
@param component: The component name, usually the plugin name, or "Core".
@type component: string
@param description: A description of the action performed during the given
keycombo.
@type description: string
@param callback: The callback to call when the given key combination
is pressed.
@type callback: callable
@param key: The key symbol of the keystroke that performs given operation.
@type key: long
@param modifiers: The modifiers that must be depressed for function to
be perfomed.
@type modifiers: int
'''
iter = self.get_iter_first()
while iter:
if self[iter][COL_CALLBACK] == callback:
# We never really remove it, just set the callback to None
self[iter][COL_CALLBACK] = ''
iter = self.iter_next(iter)
def _onComboChanged(self, model, path, iter):
'''
Callback for row changes. Copies the changed key combos over to gsettings.
@param model: The model that emitted the signal. Should be this class instance.
@type model: L{gtk.TreeModel}
@param path: The path of the row that has changed.
@type path: tuple
@param iter: The iter of the row that has changed.
@type iter: L{gtk.TreeIter}
'''
if not model[iter][COL_COMPONENT] or not model[iter][COL_DESC]:
return
gspath = self._getComboGSettingsPath(model[iter][COL_COMPONENT],
model[iter][COL_DESC])
gsettings = GSettings.new_with_path(HOTKEYS_GSCHEMA, gspath)
combo_name = gtk.accelerator_name(model[iter][COL_KEYPRESS],
gdk.ModifierType(model[iter][COL_MOD]))
key = gsettings.get_string('hotkey-combo')
if key != combo_name and key != '/':
gsettings.set_string('hotkey-combo', combo_name)
def _getComboGSettingsPath(self, component, description):
'''
Useful method that build and returns a gsettings path for a key combo.
@param component: The component of the hotkey.
@type component: string
@param description: The description of the hotkey action
@type description: string
@return: A full gsettings path
@rtype: string
'''
dash_component = self.__dasherize(component)
dash_description = self.__dasherize(description)
path = '/'.join([dash_component, dash_description])
return HOTKEYS_BASEPATH + path + '/'
def __dasherize(self, item):
'''
This method dasherize and decapitalize a given string.
@param component: The given string
@type component: string
@return: A dasherized and decapitalized string
@rtype: string
'''
return item.lower().replace(' ', '-')
class HotkeyTreeView(gtk.TreeView):
'''
A tree view of the variuos global hotkey combinations. The keys and
modifiers could also be changed through this widget.
'''
def __init__(self, hotkey_manager):
'''
Construct the tree view with the given L{HotkeyManager}.
@ivar hotkey_manager: The manager we wish to view.
@type hotkey_manager: L{HotkeyManager}
@param hotkey_manager: The manager we wish to view.
@type hotkey_manager: L{HotkeyManager}
'''
gtk.TreeView.__init__(self)
self.hotkey_manager = hotkey_manager
modelfilter = self.hotkey_manager.filter_new(None)
modelfilter.set_visible_func(self._rowVisibleFunc, None)
self.set_model(modelfilter)
crt = gtk.CellRendererText()
tvc = gtk.TreeViewColumn(_('Component'))
tvc.pack_start(crt, True)
tvc.add_attribute(crt, 'text', COL_COMPONENT)
tvc.set_cell_data_func(crt, self._componentDataFunc, COL_COMPONENT)
self.append_column(tvc)
crt = gtk.CellRendererText()
tvc = gtk.TreeViewColumn(_('Task'))
tvc.pack_start(crt, True)
tvc.add_attribute(crt, 'text', COL_DESC)
tvc.set_cell_data_func(crt, self._translateDataFunc, COL_DESC)
self.append_column(tvc)
crt = gtk.CellRendererText()
tvc = gtk.TreeViewColumn(_('Key'))
tvc.set_min_width(64)
tvc.pack_start(crt, True)
crt.props.editable = True
tvc.add_attribute(crt, 'text', COL_KEYPRESS)
tvc.set_cell_data_func(crt, self._keyCellFunc)
crt.connect('edited', self._onKeyChanged)
self.append_column(tvc)
crt = gtk.CellRendererToggle()
tvc = gtk.TreeViewColumn(_('Alt'))
tvc.pack_start(crt, True)
tvc.set_cell_data_func(crt, self._modCellFunc, gdk.ModifierType.MOD1_MASK)
crt.connect('toggled', self._onModToggled, gdk.ModifierType.MOD1_MASK)
self.append_column(tvc)
crt = gtk.CellRendererToggle()
tvc = gtk.TreeViewColumn(_('Ctrl'))
tvc.pack_start(crt, True)
tvc.set_cell_data_func(crt, self._modCellFunc, \
gdk.ModifierType.CONTROL_MASK)
crt.connect('toggled', self._onModToggled, gdk.ModifierType.CONTROL_MASK)
self.append_column(tvc)
crt = gtk.CellRendererToggle()
tvc = gtk.TreeViewColumn(_('Shift'))
tvc.pack_start(crt, True)
tvc.set_cell_data_func(crt, self._modCellFunc, gdk.ModifierType.SHIFT_MASK)
crt.connect('toggled', self._onModToggled, gdk.ModifierType.SHIFT_MASK)
self.append_column(tvc)
def _translateDataFunc(self, column, cell, model, iter, column_id):
'''
Show the component name as a translated string.
@param column: The treeview column of the cell renderer.
@type column: L{gtk.TreeViewColumn}
@param cell: The cell rendere we need to modify.
@type cell: L{gtk.CellRendererText}
@param model: The treeview's model.
@type model: L{gtk.ListStore}
@param iter: The iter of the given cell data.
@type iter: L{gtk.TreeIter}
'''
cell.set_property('text', _(model[iter][column_id]))
def _componentDataFunc(self, column, cell, model, iter, column_id):
'''
Show the component name as a translated string.
@param column: The treeview column of the cell renderer.
@type column: L{gtk.TreeViewColumn}
@param cell: The cell rendere we need to modify.
@type cell: L{gtk.CellRendererText}
@param model: The treeview's model.
@type model: L{gtk.ListStore}
@param iter: The iter of the given cell data.
@type iter: L{gtk.TreeIter}
'''
cell.set_property('text', model[iter][COL_LOCALIZED_COMP] or \
model[iter][COL_COMPONENT])
def _keyCellFunc(self, column, cell, model, iter, foo=None):
'''
Show the key symbol as a string for easy readability.
@param column: The treeview column of the cell renderer.
@type column: L{gtk.TreeViewColumn}
@param column: The cell rendere we need to modify.
@type column: L{gtk.CellRendererText}
@param model: The treeview's model.
@type model: L{gtk.ListStore}
@param iter: The iter of the given cell data.
@type iter: L{gtk.TreeIter}
'''
if model[iter][COL_KEYPRESS] > 0:
cell.set_property('text',
gdk.keyval_name(model[iter][COL_KEYPRESS]))
cell.set_property('sensitive', True)
else:
cell.set_property('text', '<select key>')
cell.set_property('sensitive', False)
def _modCellFunc(self, column, cell, model, iter, mask):
'''
Show the given modifier mask as toggled or not.
@param column: The treeview column of the cell renderer.
@type column: L{gtk.TreeViewColumn}
@param column: The cell rendere we need to modify.
@type column: L{gtk.CellRendererText}
@param model: The treeview's model.
@type model: L{gtk.ListStore}
@param iter: The iter of the given cell data.
@type iter: L{gtk.TreeIter}
@param mask: A modifier mask.
@type mask: integer
'''
cell.set_property('active', bool(mask & model[iter][COL_MOD]))
def _onKeyChanged(self, cellrenderertext, path, new_text):
'''
A callback for the key cellrenderer when 'edited'. Model must be
changed accordingly.
@param cellrenderertext: The cell renderer that emitted the signal
@type cellrenderertext: L{gtk.CellRendererText}
@param path: Path of the edited cellrenderer.
@type path: tuple
@param new_text: The new text that was entered.
@type new_text: string
'''
keysym = -1
if new_text:
try:
keysym = _charToKeySym(new_text)
except:
keysym = _charToKeySym(new_text[0])
self.hotkey_manager[path][COL_KEYPRESS] = int(keysym)
def _onModToggled(self, renderer_toggle, path, mask):
'''
A callback for the modifiers' cellrenderers when 'toggled'.
Model must be changed accordingly.
@param renderer_toggle: The cell renderer that emitted the signal
@type renderer_toggle: L{gtk.CellRendererToggle}
@param path: Path of the edited cellrenderer.
@type path: tuple
@param mask: Modifier mask that must be inverted.
@type new_text: integer
'''
self.hotkey_manager[path][COL_MOD] ^= mask
def _rowVisibleFunc(self, model, iter, foo=None):
'''
A filter function to hide the rows that do not contain valid callbacks.
This is usually the case when a plugin is disabled.
@param model: The view's model.
@type model: L{gtk.ListStore}
@param iter: The iter of the row in question.
@type iter: L{gtk.TreeIter}
@return: True if row should be displayed.
@rtype: boolean
'''
return bool(model[iter][COL_CALLBACK])
| bsd-3-clause |
kkdd/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/sqlite3/test/transactions.py | 51 | 6698 | #-*- coding: ISO-8859-1 -*-
# pysqlite2/test/transactions.py: tests transactions
#
# Copyright (C) 2005-2007 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import sys
import os, unittest
import sqlite3 as sqlite
def get_db_path():
return "sqlite_testdb"
class TransactionTests(unittest.TestCase):
def setUp(self):
try:
os.remove(get_db_path())
except OSError:
pass
self.con1 = sqlite.connect(get_db_path(), timeout=0.1)
self.cur1 = self.con1.cursor()
self.con2 = sqlite.connect(get_db_path(), timeout=0.1)
self.cur2 = self.con2.cursor()
def tearDown(self):
self.cur1.close()
self.con1.close()
self.cur2.close()
self.con2.close()
try:
os.unlink(get_db_path())
except OSError:
pass
def CheckDMLdoesAutoCommitBefore(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.cur1.execute("create table test2(j)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.failUnlessEqual(len(res), 1)
def CheckInsertStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.failUnlessEqual(len(res), 0)
def CheckUpdateStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("update test set i=6")
self.cur2.execute("select i from test")
res = self.cur2.fetchone()[0]
self.failUnlessEqual(res, 5)
def CheckDeleteStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("delete from test")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.failUnlessEqual(len(res), 1)
def CheckReplaceStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("replace into test(i) values (6)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.failUnlessEqual(len(res), 1)
self.failUnlessEqual(res[0][0], 5)
def CheckToggleAutoCommit(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.isolation_level = None
self.failUnlessEqual(self.con1.isolation_level, None)
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.failUnlessEqual(len(res), 1)
self.con1.isolation_level = "DEFERRED"
self.failUnlessEqual(self.con1.isolation_level , "DEFERRED")
self.cur1.execute("insert into test(i) values (5)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.failUnlessEqual(len(res), 1)
def CheckRaiseTimeout(self):
if sqlite.sqlite_version_info < (3, 2, 2):
# This will fail (hang) on earlier versions of sqlite.
# Determine exact version it was fixed. 3.2.1 hangs.
return
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
try:
self.cur2.execute("insert into test(i) values (5)")
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
pass
except:
self.fail("should have raised an OperationalError")
def CheckLocking(self):
"""
This tests the improved concurrency with pysqlite 2.3.4. You needed
to roll back con2 before you could commit con1.
"""
if sqlite.sqlite_version_info < (3, 2, 2):
# This will fail (hang) on earlier versions of sqlite.
# Determine exact version it was fixed. 3.2.1 hangs.
return
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
try:
self.cur2.execute("insert into test(i) values (5)")
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
pass
except:
self.fail("should have raised an OperationalError")
# NO self.con2.rollback() HERE!!!
self.con1.commit()
class SpecialCommandTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.cur = self.con.cursor()
def CheckVacuum(self):
self.cur.execute("create table test(i)")
self.cur.execute("insert into test(i) values (5)")
self.cur.execute("vacuum")
def CheckDropTable(self):
self.cur.execute("create table test(i)")
self.cur.execute("insert into test(i) values (5)")
self.cur.execute("drop table test")
def CheckPragma(self):
self.cur.execute("create table test(i)")
self.cur.execute("insert into test(i) values (5)")
self.cur.execute("pragma count_changes=1")
def tearDown(self):
self.cur.close()
self.con.close()
def suite():
default_suite = unittest.makeSuite(TransactionTests, "Check")
special_command_suite = unittest.makeSuite(SpecialCommandTests, "Check")
return unittest.TestSuite((default_suite, special_command_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| apache-2.0 |
rhndg/openedx | common/djangoapps/track/tests/test_logs.py | 163 | 2712 | """Tests that tracking data are successfully logged"""
import mock
import unittest
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.conf import settings
from track.models import TrackingLog
from track.views import user_track
@unittest.skip("TODO: these tests were not being run before, and now that they are they're failing")
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TrackingTest(TestCase):
"""
Tests that tracking logs correctly handle events
"""
def test_post_answers_to_log(self):
"""
Checks that student answer requests submitted to track.views via POST
are correctly logged in the TrackingLog db table
"""
requests = [
{"event": "my_event", "event_type": "my_event_type", "page": "my_page"},
{"event": "{'json': 'object'}", "event_type": unichr(512), "page": "my_page"}
]
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_SQL_TRACKING_LOGS': True}):
for request_params in requests:
response = self.client.post(reverse(user_track), request_params)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'success')
tracking_logs = TrackingLog.objects.order_by('-dtcreated')
log = tracking_logs[0]
self.assertEqual(log.event, request_params["event"])
self.assertEqual(log.event_type, request_params["event_type"])
self.assertEqual(log.page, request_params["page"])
def test_get_answers_to_log(self):
"""
Checks that student answer requests submitted to track.views via GET
are correctly logged in the TrackingLog db table
"""
requests = [
{"event": "my_event", "event_type": "my_event_type", "page": "my_page"},
{"event": "{'json': 'object'}", "event_type": unichr(512), "page": "my_page"}
]
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_SQL_TRACKING_LOGS': True}):
for request_params in requests:
response = self.client.get(reverse(user_track), request_params)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'success')
tracking_logs = TrackingLog.objects.order_by('-dtcreated')
log = tracking_logs[0]
self.assertEqual(log.event, request_params["event"])
self.assertEqual(log.event_type, request_params["event_type"])
self.assertEqual(log.page, request_params["page"])
| agpl-3.0 |
chris-allan/openmicroscopy | components/xsd-fu/generateDS/Demos/Xmlbehavior/xmlbehavior.py | 33 | 29105 | #!/usr/bin/env python
#
# Generated Wed Jun 30 10:34:05 2004 by generateDS.py.
#
import sys
import getopt
from xml.dom import minidom
from xml.dom import Node
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
s1 = inStr
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('"', '"')
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
#
# Data representation classes.
#
class xml_behavior:
subclass = None
def __init__(self, base_impl_url='', behaviors=None):
self.base_impl_url = base_impl_url
self.behaviors = behaviors
def factory(*args_, **kwargs_):
if xml_behavior.subclass:
return xml_behavior.subclass(*args_, **kwargs_)
else:
return xml_behavior(*args_, **kwargs_)
factory = staticmethod(factory)
def getBase_impl_url(self): return self.base_impl_url
def setBase_impl_url(self, base_impl_url): self.base_impl_url = base_impl_url
def getBehaviors(self): return self.behaviors
def setBehaviors(self, behaviors): self.behaviors = behaviors
def export(self, outfile, level, name_='xml-behavior'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
showIndent(outfile, level)
outfile.write('<base-impl-url>%s</base-impl-url>\n' % quote_xml(self.getBase_impl_url()))
if self.behaviors:
self.behaviors.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='xml-behavior'):
level += 1
showIndent(outfile, level)
outfile.write('base_impl_url=%s,\n' % quote_python(self.getBase_impl_url()))
if self.behaviors:
showIndent(outfile, level)
outfile.write('behaviors=behaviors(\n')
self.behaviors.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'base-impl-url':
base_impl_url = ''
for text_ in child.childNodes:
base_impl_url += text_.nodeValue
self.base_impl_url = base_impl_url
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'behaviors':
obj = behaviors.factory()
obj.build(child)
self.setBehaviors(obj)
# end class xml_behavior
class behaviors:
subclass = None
def __init__(self, behavior=None):
if behavior is None:
self.behavior = []
else:
self.behavior = behavior
def factory(*args_, **kwargs_):
if behaviors.subclass:
return behaviors.subclass(*args_, **kwargs_)
else:
return behaviors(*args_, **kwargs_)
factory = staticmethod(factory)
def getBehavior(self): return self.behavior
def addBehavior(self, value): self.behavior.append(value)
def setBehavior(self, index, value): self.behavior[index] = value
def export(self, outfile, level, name_='behaviors'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
for behavior in self.behavior:
behavior.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='behaviors'):
level += 1
showIndent(outfile, level)
outfile.write('behavior=[\n')
level += 1
for behavior in self.behavior:
showIndent(outfile, level)
outfile.write('behavior(\n')
behavior.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'behavior':
obj = behavior.factory()
obj.build(child)
self.behavior.append(obj)
# end class behaviors
class behavior:
subclass = None
def __init__(self, klass='', name='', return_type='', args=None, impl_url='', ancillaries=None):
self.klass = klass
self.name = name
self.return_type = return_type
self.args = args
self.impl_url = impl_url
self.ancillaries = ancillaries
def factory(*args_, **kwargs_):
if behavior.subclass:
return behavior.subclass(*args_, **kwargs_)
else:
return behavior(*args_, **kwargs_)
factory = staticmethod(factory)
def getClass(self): return self.klass
def setClass(self, klass): self.klass = klass
def getName(self): return self.name
def setName(self, name): self.name = name
def getReturn_type(self): return self.return_type
def setReturn_type(self, return_type): self.return_type = return_type
def getArgs(self): return self.args
def setArgs(self, args): self.args = args
def getImpl_url(self): return self.impl_url
def setImpl_url(self, impl_url): self.impl_url = impl_url
def getAncillaries(self): return self.ancillaries
def setAncillaries(self, ancillaries): self.ancillaries = ancillaries
def export(self, outfile, level, name_='behavior'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
showIndent(outfile, level)
outfile.write('<class>%s</class>\n' % quote_xml(self.getKlass()))
showIndent(outfile, level)
outfile.write('<name>%s</name>\n' % quote_xml(self.getName()))
showIndent(outfile, level)
outfile.write('<return-type>%s</return-type>\n' % quote_xml(self.getReturn_type()))
if self.args:
self.args.export(outfile, level)
showIndent(outfile, level)
outfile.write('<impl-url>%s</impl-url>\n' % quote_xml(self.getImpl_url()))
if self.ancillaries:
self.ancillaries.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='behavior'):
level += 1
showIndent(outfile, level)
outfile.write('klass=%s,\n' % quote_python(self.getKlass()))
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.getName()))
showIndent(outfile, level)
outfile.write('return_type=%s,\n' % quote_python(self.getReturn_type()))
if self.args:
showIndent(outfile, level)
outfile.write('args=args(\n')
self.args.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('impl_url=%s,\n' % quote_python(self.getImpl_url()))
if self.ancillaries:
showIndent(outfile, level)
outfile.write('ancillaries=ancillaries(\n')
self.ancillaries.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'class':
klass = ''
for text_ in child.childNodes:
klass += text_.nodeValue
self.klass = klass
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name = ''
for text_ in child.childNodes:
name += text_.nodeValue
self.name = name
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'return-type':
return_type = ''
for text_ in child.childNodes:
return_type += text_.nodeValue
self.return_type = return_type
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'args':
obj = args.factory()
obj.build(child)
self.setArgs(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'impl-url':
impl_url = ''
for text_ in child.childNodes:
impl_url += text_.nodeValue
self.impl_url = impl_url
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'ancillaries':
obj = ancillaries.factory()
obj.build(child)
self.setAncillaries(obj)
# end class behavior
class args:
subclass = None
def __init__(self, arg=None):
if arg is None:
self.arg = []
else:
self.arg = arg
def factory(*args_, **kwargs_):
if args.subclass:
return args.subclass(*args_, **kwargs_)
else:
return args(*args_, **kwargs_)
factory = staticmethod(factory)
def getArg(self): return self.arg
def addArg(self, value): self.arg.append(value)
def setArg(self, index, value): self.arg[index] = value
def export(self, outfile, level, name_='args'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
for arg in self.arg:
arg.export(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='args'):
level += 1
showIndent(outfile, level)
outfile.write('arg=[\n')
level += 1
for arg in self.arg:
showIndent(outfile, level)
outfile.write('arg(\n')
arg.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'arg':
obj = arg.factory()
obj.build(child)
self.arg.append(obj)
# end class args
class arg:
subclass = None
def __init__(self, name='', data_type=''):
self.name = name
self.data_type = data_type
def factory(*args_, **kwargs_):
if arg.subclass:
return arg.subclass(*args_, **kwargs_)
else:
return arg(*args_, **kwargs_)
factory = staticmethod(factory)
def getName(self): return self.name
def setName(self, name): self.name = name
def getData_type(self): return self.data_type
def setData_type(self, data_type): self.data_type = data_type
def export(self, outfile, level, name_='arg'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
showIndent(outfile, level)
outfile.write('<name>%s</name>\n' % quote_xml(self.getName()))
showIndent(outfile, level)
outfile.write('<data-type>%s</data-type>\n' % quote_xml(self.getData_type()))
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='arg'):
level += 1
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.getName()))
showIndent(outfile, level)
outfile.write('data_type=%s,\n' % quote_python(self.getData_type()))
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name = ''
for text_ in child.childNodes:
name += text_.nodeValue
self.name = name
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'data-type':
data_type = ''
for text_ in child.childNodes:
data_type += text_.nodeValue
self.data_type = data_type
# end class arg
class ancillaries:
subclass = None
def __init__(self, ancillary=None):
if ancillary is None:
self.ancillary = []
else:
self.ancillary = ancillary
def factory(*args_, **kwargs_):
if ancillaries.subclass:
return ancillaries.subclass(*args_, **kwargs_)
else:
return ancillaries(*args_, **kwargs_)
factory = staticmethod(factory)
def getAncillary(self): return self.ancillary
def addAncillary(self, value): self.ancillary.append(value)
def setAncillary(self, index, value): self.ancillary[index] = value
def export(self, outfile, level, name_='ancillaries'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
for ancillary in self.ancillary:
ancillary.export(outfile, level, name_='ancillary')
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='ancillaries'):
level += 1
showIndent(outfile, level)
outfile.write('ancillary=[\n')
level += 1
for ancillary in self.ancillary:
showIndent(outfile, level)
outfile.write('arg(\n')
ancillary.exportLiteral(outfile, level, name_='ancillary')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'ancillary':
obj = ancillary.factory()
obj.build(child)
self.ancillary.append(obj)
# end class ancillaries
class ancillary:
subclass = None
def __init__(self, klass='', role='', return_type='', name='', args=None, impl_url=''):
self.klass = klass
self.role = role
self.return_type = return_type
self.name = name
self.args = args
self.impl_url = impl_url
def factory(*args_, **kwargs_):
if ancillary.subclass:
return ancillary.subclass(*args_, **kwargs_)
else:
return ancillary(*args_, **kwargs_)
factory = staticmethod(factory)
def getClass(self): return self.klass
def setClass(self, klass): self.klass = klass
def getRole(self): return self.role
def setRole(self, role): self.role = role
def getReturn_type(self): return self.return_type
def setReturn_type(self, return_type): self.return_type = return_type
def getName(self): return self.name
def setName(self, name): self.name = name
def getArgs(self): return self.args
def setArgs(self, args): self.args = args
def getImpl_url(self): return self.impl_url
def setImpl_url(self, impl_url): self.impl_url = impl_url
def export(self, outfile, level, name_='ancillary'):
showIndent(outfile, level)
outfile.write('<%s>\n' % name_)
level += 1
showIndent(outfile, level)
outfile.write('<class>%s</class>\n' % quote_xml(self.getKlass()))
showIndent(outfile, level)
outfile.write('<role>%s</role>\n' % quote_xml(self.getRole()))
showIndent(outfile, level)
outfile.write('<return-type>%s</return-type>\n' % quote_xml(self.getReturn_type()))
showIndent(outfile, level)
outfile.write('<name>%s</name>\n' % quote_xml(self.getName()))
if self.args:
self.args.export(outfile, level)
showIndent(outfile, level)
outfile.write('<impl-url>%s</impl-url>\n' % quote_xml(self.getImpl_url()))
level -= 1
showIndent(outfile, level)
outfile.write('</%s>\n' % name_)
def exportLiteral(self, outfile, level, name_='ancillary'):
level += 1
showIndent(outfile, level)
outfile.write('klass=%s,\n' % quote_python(self.getKlass()))
showIndent(outfile, level)
outfile.write('role=%s,\n' % quote_python(self.getRole()))
showIndent(outfile, level)
outfile.write('return_type=%s,\n' % quote_python(self.getReturn_type()))
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.getName()))
if self.args:
showIndent(outfile, level)
outfile.write('args=args(\n')
self.args.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('impl_url=%s,\n' % quote_python(self.getImpl_url()))
level -= 1
def build(self, node_):
attrs = node_.attributes
for child in node_.childNodes:
nodeName_ = child.nodeName.split(':')[-1]
if child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'class':
klass = ''
for text_ in child.childNodes:
klass += text_.nodeValue
self.klass = klass
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'role':
role = ''
for text_ in child.childNodes:
role += text_.nodeValue
self.role = role
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'return-type':
return_type = ''
for text_ in child.childNodes:
return_type += text_.nodeValue
self.return_type = return_type
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'name':
name = ''
for text_ in child.childNodes:
name += text_.nodeValue
self.name = name
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'args':
obj = args.factory()
obj.build(child)
self.setArgs(obj)
elif child.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'impl-url':
impl_url = ''
for text_ in child.childNodes:
impl_url += text_.nodeValue
self.impl_url = impl_url
# end class ancillary
from xml.sax import handler, make_parser
class SaxStackElement:
def __init__(self, name='', obj=None):
self.name = name
self.obj = obj
self.content = ''
#
# SAX handler
#
class SaxXml_behaviorHandler(handler.ContentHandler):
def __init__(self):
self.stack = []
self.root = None
def getRoot(self):
return self.root
def setDocumentLocator(self, locator):
self.locator = locator
def showError(self, msg):
print '*** (showError):', msg
sys.exit(-1)
def startElement(self, name, attrs):
done = 0
if name == 'xml-behavior':
obj = xml-behavior.factory()
stackObj = SaxStackElement('xml-behavior', obj)
self.stack.append(stackObj)
done = 1
elif name == 'base-impl-url':
stackObj = SaxStackElement('base_impl_url', None)
self.stack.append(stackObj)
done = 1
elif name == 'behaviors':
obj = behaviors.factory()
stackObj = SaxStackElement('behaviors', obj)
self.stack.append(stackObj)
done = 1
elif name == 'behavior':
obj = behavior.factory()
stackObj = SaxStackElement('behavior', obj)
self.stack.append(stackObj)
done = 1
elif name == 'class':
stackObj = SaxStackElement('klass', None)
self.stack.append(stackObj)
done = 1
elif name == 'name':
stackObj = SaxStackElement('name', None)
self.stack.append(stackObj)
done = 1
elif name == 'return-type':
stackObj = SaxStackElement('return_type', None)
self.stack.append(stackObj)
done = 1
elif name == 'args':
obj = args.factory()
stackObj = SaxStackElement('args', obj)
self.stack.append(stackObj)
done = 1
elif name == 'impl-url':
stackObj = SaxStackElement('impl_url', None)
self.stack.append(stackObj)
done = 1
elif name == 'ancillaries':
obj = ancillaries.factory()
stackObj = SaxStackElement('ancillaries', obj)
self.stack.append(stackObj)
done = 1
elif name == 'arg':
obj = arg.factory()
stackObj = SaxStackElement('arg', obj)
self.stack.append(stackObj)
done = 1
elif name == 'data-type':
stackObj = SaxStackElement('data_type', None)
self.stack.append(stackObj)
done = 1
elif name == 'ancillary':
obj = arg.factory()
stackObj = SaxStackElement('ancillary', obj)
self.stack.append(stackObj)
done = 1
elif name == 'role':
stackObj = SaxStackElement('role', None)
self.stack.append(stackObj)
done = 1
if not done:
self.reportError('"%s" element not allowed here.' % name)
def endElement(self, name):
done = 0
if name == 'xml-behavior':
if len(self.stack) == 1:
self.root = self.stack[-1].obj
self.stack.pop()
done = 1
elif name == 'base-impl-url':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setBase_impl_url(content)
self.stack.pop()
done = 1
elif name == 'behaviors':
if len(self.stack) >= 2:
self.stack[-2].obj.setBehaviors(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'behavior':
if len(self.stack) >= 2:
self.stack[-2].obj.addBehavior(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'class':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setClass(content)
self.stack.pop()
done = 1
elif name == 'name':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setName(content)
self.stack.pop()
done = 1
elif name == 'return-type':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setReturn_type(content)
self.stack.pop()
done = 1
elif name == 'args':
if len(self.stack) >= 2:
self.stack[-2].obj.setArgs(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'impl-url':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setImpl_url(content)
self.stack.pop()
done = 1
elif name == 'ancillaries':
if len(self.stack) >= 2:
self.stack[-2].obj.setAncillaries(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'arg':
if len(self.stack) >= 2:
self.stack[-2].obj.addArg(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'data-type':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setData_type(content)
self.stack.pop()
done = 1
elif name == 'ancillary':
if len(self.stack) >= 2:
self.stack[-2].obj.addAncillary(self.stack[-1].obj)
self.stack.pop()
done = 1
elif name == 'role':
if len(self.stack) >= 2:
content = self.stack[-1].content
self.stack[-2].obj.setRole(content)
self.stack.pop()
done = 1
if not done:
self.reportError('"%s" element not allowed here.' % name)
def characters(self, chrs, start, end):
if len(self.stack) > 0:
self.stack[-1].content += chrs[start:end]
def reportError(self, mesg):
locator = self.locator
sys.stderr.write('Doc: %s Line: %d Column: %d\n' % \
(locator.getSystemId(), locator.getLineNumber(),
locator.getColumnNumber() + 1))
sys.stderr.write(mesg)
sys.stderr.write('\n')
sys.exit(-1)
#raise RuntimeError
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
Options:
-s Use the SAX parser, not the minidom parser.
"""
def usage():
print USAGE_TEXT
sys.exit(-1)
def saxParse(inFileName):
parser = make_parser()
documentHandler = SaxXml_behaviorHandler()
parser.setDocumentHandler(documentHandler)
parser.parse('file:%s' % inFileName)
root = documentHandler.getRoot()
sys.stdout.write('<?xml version="1.0" ?>\n')
root.export(sys.stdout, 0)
return root
def saxParseString(inString):
parser = make_parser()
documentHandler = SaxXml_behaviorHandler()
parser.setDocumentHandler(documentHandler)
parser.feed(inString)
parser.close()
rootObj = documentHandler.getRoot()
#sys.stdout.write('<?xml version="1.0" ?>\n')
#rootObj.export(sys.stdout, 0)
return rootObj
def parse(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.childNodes[0]
rootObj = xml_behavior.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0)
return rootObj
def parseString(inString):
doc = minidom.parseString(inString)
rootNode = doc.childNodes[0]
rootObj = xml_behavior.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0)
return rootObj
def parseLiteral(inFileName):
doc = minidom.parse(inFileName)
rootNode = doc.childNodes[0]
rootObj = xml_behavior.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('from xmlbehavior import *\n\n')
sys.stdout.write('rootObj = xml_behavior(\n')
rootObj.exportLiteral(sys.stdout, 0)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 2 and args[0] == '-s':
saxParse(args[1])
elif len(args) == 1:
parseLiteral(args[0])
else:
usage()
if __name__ == '__main__':
main()
#import pdb
#pdb.run('main()')
| gpl-2.0 |
capstone-coal/pycoal | docs/source/conf.py | 2 | 11124 | # Copyright (C) 2017-2019 COAL Developers
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
# COAL documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 3 19:45:34 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath('../../pycoal'))
import guzzle_sphinx_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pycoal'
copyright = u'Copyright (C) 2017-2019 COAL Developers'
author = u'COAL Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.5'
# The full version, including alpha/beta/rc tags.
release = u'0.5.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'guzzle_sphinx_theme'
# Register the theme as an extension to generate a sitemap.xml
extensions.append("guzzle_sphinx_theme")
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
# Set the name of the project to appear in the sidebar
"project_nav_name": "Pycoal",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = guzzle_sphinx_theme.html_theme_path()
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
html_title = u'Pycoal: A python suite for the identification and characterization of mining activity within AVIRIS data.'
# A shorter title for the navigation bar. Default is the same as html_title.
#
#html_short_title = COAL
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
html_sidebars = {
'**': [
# 'about.html',
# 'navigation.html',
# 'relations.html',
'searchbox.html',
# 'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'COALdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'COAL.tex', u'COAL Documentation',
u'COAL Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'coal', u'COAL Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'COAL', u'COAL Documentation',
author, 'COAL Developers', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| gpl-2.0 |
xerosanyam/yowsup | yowsup/layers/protocol_groups/protocolentities/iq_groups_participants_add_success.py | 61 | 1785 | from yowsup.structs import ProtocolTreeNode
from yowsup.layers.protocol_iq.protocolentities import ResultIqProtocolEntity
class SuccessAddParticipantsIqProtocolEntity(ResultIqProtocolEntity):
'''
<iq type="result" from="{{group_jid}}" id="{{id}}">
<add type="success" participant="{{jid}}"></add>
<add type="success" participant="{{jid}}"></add>
</iq>
'''
def __init__(self, _id, groupId, participantList):
super(SuccessAddParticipantsIqProtocolEntity, self).__init__(_from = groupId, _id = _id)
self.setProps(groupId, participantList)
def setProps(self, groupId, participantList):
self.groupId = groupId
self.participantList = participantList
self.action = 'add'
def getAction(self):
return self.action
def toProtocolTreeNode(self):
node = super(SuccessAddParticipantsIqProtocolEntity, self).toProtocolTreeNode()
participantNodes = [
ProtocolTreeNode("add", {
"type": "success",
"participant": participant
})
for participant in self.participantList
]
node.addChildren(participantNodes)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = super(SuccessAddParticipantsIqProtocolEntity, SuccessAddParticipantsIqProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = SuccessAddParticipantsIqProtocolEntity
participantList = []
for participantNode in node.getAllChildren():
if participantNode["type"]=="success":
participantList.append(participantNode["participant"])
entity.setProps(node.getAttributeValue("from"), participantList)
return entity
| gpl-3.0 |
llooker/python_sdk | test/test_look.py | 1 | 2288 | # coding: utf-8
"""
Looker API 3.0 Reference
### Authorization The Looker API uses Looker **API3** credentials for authorization and access control. Looker admins can create API3 credentials on Looker's **Admin/Users** page. Pass API3 credentials to the **/login** endpoint to obtain a temporary access_token. Include that access_token in the Authorization header of Looker API requests. For details, see [Looker API Authorization](https://looker.com/docs/r/api/authorization) ### Client SDKs The Looker API is a RESTful system that should be usable by any programming language capable of making HTTPS requests. Client SDKs for a variety of programming languages can be generated from the Looker API's Swagger JSON metadata to streamline use of the Looker API in your applications. A client SDK for Ruby is available as an example. For more information, see [Looker API Client SDKs](https://looker.com/docs/r/api/client_sdks) ### Try It Out! The 'api-docs' page served by the Looker instance includes 'Try It Out!' buttons for each API method. After logging in with API3 credentials, you can use the \"Try It Out!\" buttons to call the API directly from the documentation page to interactively explore API features and responses. ### Versioning Future releases of Looker will expand this API release-by-release to securely expose more and more of the core power of Looker to API client applications. API endpoints marked as \"beta\" may receive breaking changes without warning. Stable (non-beta) API endpoints should not receive breaking changes in future releases. For more information, see [Looker API Versioning](https://looker.com/docs/r/api/versioning)
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.look import Look
class TestLook(unittest.TestCase):
""" Look unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testLook(self):
"""
Test Look
"""
model = swagger_client.models.look.Look()
if __name__ == '__main__':
unittest.main()
| mit |
JaDogg/__py_playground | reference/ply-3.8/test/lex_hedit.py | 174 | 1141 | # -----------------------------------------------------------------------------
# hedit.py
#
# Paring of Fortran H Edit descriptions (Contributed by Pearu Peterson)
#
# These tokens can't be easily tokenized because they are of the following
# form:
#
# nHc1...cn
#
# where n is a positive integer and c1 ... cn are characters.
#
# This example shows how to modify the state of the lexer to parse
# such tokens
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.lex as lex
tokens = (
'H_EDIT_DESCRIPTOR',
)
# Tokens
t_ignore = " \t\n"
def t_H_EDIT_DESCRIPTOR(t):
r"\d+H.*" # This grabs all of the remaining text
i = t.value.index('H')
n = eval(t.value[:i])
# Adjust the tokenizing position
t.lexer.lexpos -= len(t.value) - (i+1+n)
t.value = t.value[i+1:i+1+n]
return t
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
lex.lex()
lex.runmain(data="3Habc 10Habcdefghij 2Hxy")
| mit |
georgeyk/prettyconf | tests/test_envfile.py | 1 | 1999 | # coding: utf-8
import os
from .base import BaseTestCase
from prettyconf.loaders import EnvFileConfigurationLoader
class EnvFileTestCase(BaseTestCase):
def setUp(self):
super(EnvFileTestCase, self).setUp()
self.envfile = os.path.join(self.test_files_path, "envfile")
def test_config_file_parsing(self):
config = EnvFileConfigurationLoader(self.envfile)
self.assertEqual(config["KEY"], "Value")
self.assertEqual(config["KEY_EMPTY"], "")
self.assertEqual(config["KEY_EMPTY_WITH_COMMENTS"], "")
self.assertEqual(config["INLINE_COMMENTS"], "Foo")
self.assertEqual(config["HASH_CONTENT"], "Foo Bar # Baz %(key)s")
self.assertEqual(config["PERCENT_NOT_ESCAPED"], "%%")
self.assertEqual(config["NO_INTERPOLATION"], "%(KeyOff)s")
self.assertEqual(config["IGNORE_SPACE"], "text")
self.assertEqual(config["SINGLE_QUOTE_SPACE"], " text")
self.assertEqual(config["DOUBLE_QUOTE_SPACE"], " text")
self.assertEqual(config["UPDATED"], "text")
self.assertEqual(config["CACHE_URL_QUOTES"], "cache+memcached://foo:bar@localhost:11211/?n=1&x=2,5")
self.assertEqual(config["CACHE_URL"], "cache+memcached://foo:bar@localhost:11211/?n=1&x=2,5")
self.assertEqual(config["DOUBLE_QUOTE_INSIDE_QUOTE"], 'foo "bar" baz')
self.assertEqual(config["SINGLE_QUOTE_INSIDE_QUOTE"], "foo 'bar' baz")
def test_missing_invalid_keys_in_config_file_parsing(self):
config = EnvFileConfigurationLoader(self.envfile)
self.assertNotIn("COMMENTED_KEY", config)
self.assertNotIn("INVALID_KEY", config)
self.assertNotIn("OTHER_INVALID_KEY", config)
def test_list_config_filenames(self):
self._create_file(self.test_files_path + "/.env")
filenames = EnvFileConfigurationLoader.get_filenames(self.test_files_path)
self.assertEqual(len(filenames), 1)
self.assertEqual(self.test_files_path + "/.env", filenames[0])
| mit |
wood-galaxy/FreeCAD | src/Mod/Arch/ArchEquipment.py | 1 | 13791 | # -*- coding: utf8 -*-
#***************************************************************************
#* *
#* Copyright (c) 2014 *
#* Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="FreeCAD Equipment"
__author__ = "Yorik van Havre"
__url__ = "http://www.freecadweb.org"
import FreeCAD,Draft,ArchComponent,DraftVecUtils,ArchCommands,Units
from FreeCAD import Vector
if FreeCAD.GuiUp:
import FreeCADGui
from PySide import QtCore, QtGui
from DraftTools import translate
from PySide.QtCore import QT_TRANSLATE_NOOP
else:
def translate(ctxt,txt):
return txt
def QT_TRANSLATE_NOOP(ctxt,txt):
return txt
# presets
Roles = ["Furniture", "Hydro Equipment", "Electric Equipment"]
def makeEquipment(baseobj=None,placement=None,name="Equipment",type=None):
"makeEquipment([baseobj,placement,name,type]): creates an equipment object from the given base object"
if type:
if type == "Part":
obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name)
else:
obj = FreeCAD.ActiveDocument.addObject("Mesh::FeaturePython",name)
_Equipment(obj)
if baseobj:
obj.Base = baseobj
else:
if baseobj:
if baseobj.isDerivedFrom("Mesh::Feature"):
obj = FreeCAD.ActiveDocument.addObject("Mesh::FeaturePython",name)
else:
obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name)
_Equipment(obj)
obj.Base = baseobj
else:
obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name)
_Equipment(obj)
obj.Label = translate("Arch",name)
if placement:
obj.Placement = placement
if FreeCAD.GuiUp:
_ViewProviderEquipment(obj.ViewObject)
if baseobj:
baseobj.ViewObject.hide()
return obj
def createMeshView(obj,direction=FreeCAD.Vector(0,0,-1),outeronly=False,largestonly=False):
"""createMeshView(obj,[direction,outeronly,largestonly]): creates a flat shape that is the
projection of the given mesh object in the given direction (default = on the XY plane). If
outeronly is True, only the outer contour is taken into consideration, discarding the inner
holes. If largestonly is True, only the largest segment of the given mesh will be used."""
import Mesh, math, Part, DraftGeomUtils
if not obj.isDerivedFrom("Mesh::Feature"):
return
mesh = obj.Mesh
# 1. Flattening the mesh
proj = []
for f in mesh.Facets:
nf = []
for v in f.Points:
v = FreeCAD.Vector(v)
a = v.negative().getAngle(direction)
l = math.cos(a)*v.Length
p = v.add(FreeCAD.Vector(direction).multiply(l))
p = DraftVecUtils.rounded(p)
nf.append(p)
proj.append(nf)
flatmesh = Mesh.Mesh(proj)
# 2. Removing wrong faces
facets = []
for f in flatmesh.Facets:
if f.Normal.getAngle(direction) < math.pi:
facets.append(f)
cleanmesh = Mesh.Mesh(facets)
#Mesh.show(cleanmesh)
# 3. Getting the bigger mesh from the planar segments
if largestonly:
c = cleanmesh.getSeparateComponents()
#print c
cleanmesh = c[0]
segs = cleanmesh.getPlanarSegments(1)
meshes = []
for s in segs:
f = [cleanmesh.Facets[i] for i in s]
meshes.append(Mesh.Mesh(f))
a = 0
for m in meshes:
if m.Area > a:
boundarymesh = m
a = m.Area
#Mesh.show(boundarymesh)
cleanmesh = boundarymesh
# 4. Creating a Part and getting the contour
shape = None
for f in cleanmesh.Facets:
p = Part.makePolygon(f.Points+[f.Points[0]])
#print p,len(p.Vertexes),p.isClosed()
try:
p = Part.Face(p)
if shape:
shape = shape.fuse(p)
else:
shape = p
except Part.OCCError:
pass
shape = shape.removeSplitter()
# 5. Extracting the largest wire
if outeronly:
count = 0
largest = None
for w in shape.Wires:
if len(w.Vertexes) > count:
count = len(w.Vertexes)
largest = w
if largest:
try:
f = Part.Face(w)
except Part.OCCError:
print "Unable to produce a face from the outer wire."
else:
shape = f
return shape
class _CommandEquipment:
"the Arch Equipment command definition"
def GetResources(self):
return {'Pixmap' : 'Arch_Equipment',
'MenuText': QT_TRANSLATE_NOOP("Arch_Equipment","Equipment"),
'Accel': "E, Q",
'ToolTip': QT_TRANSLATE_NOOP("Arch_Equipment","Creates an equipment object from a selected object (Part or Mesh)")}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
s = FreeCADGui.Selection.getSelection()
if not s:
FreeCAD.Console.PrintError(translate("Arch","You must select a base object first!"))
else:
base = s[0].Name
FreeCAD.ActiveDocument.openTransaction(str(translate("Arch","Create Equipment")))
FreeCADGui.addModule("Arch")
FreeCADGui.doCommand("Arch.makeEquipment(FreeCAD.ActiveDocument." + base + ")")
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
# get diffuse color info from base object
if hasattr(s[0].ViewObject,"DiffuseColor"):
FreeCADGui.doCommand("FreeCAD.ActiveDocument.Objects[-1].ViewObject.DiffuseColor = FreeCAD.ActiveDocument." + base + ".ViewObject.DiffuseColor")
return
class _Command3Views:
"the Arch 3Views command definition"
def GetResources(self):
return {'Pixmap' : 'Arch_3Views',
'MenuText': QT_TRANSLATE_NOOP("Arch_3Views","3 views from mesh"),
'ToolTip': QT_TRANSLATE_NOOP("Arch_3Views","Creates 3 views (top, front, side) from a mesh-based object")}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
s = FreeCADGui.Selection.getSelection()
if len(s) != 1:
FreeCAD.Console.PrintError(translate("Arch","You must select exactly one base object"))
else:
obj = s[0]
if not obj.isDerivedFrom("Mesh::Feature"):
FreeCAD.Console.PrintError(translate("Arch","The selected object must be a mesh"))
else:
if obj.Mesh.CountFacets > 1000:
msgBox = QtGui.QMessageBox()
msgBox.setText(translate("Arch","This mesh has more than 1000 facets."))
msgBox.setInformativeText(translate("Arch","This operation can take a long time. Proceed?"))
msgBox.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)
msgBox.setDefaultButton(QtGui.QMessageBox.Cancel)
ret = msgBox.exec_()
if ret == QtGui.QMessageBox.Cancel:
return
elif obj.Mesh.CountFacets >= 500:
FreeCAD.Console.PrintWarning(translate("Arch","The mesh has more than 500 facets. This will take a couple of minutes..."))
FreeCAD.ActiveDocument.openTransaction(str(translate("Arch","Create 3 views")))
FreeCADGui.addModule("Arch")
FreeCADGui.addModule("Part")
FreeCADGui.doCommand("s1 = Arch.createMeshView(FreeCAD.ActiveDocument." + obj.Name + ",FreeCAD.Vector(0,0,-1),outeronly=False,largestonly=False)")
FreeCADGui.doCommand("Part.show(s1)")
FreeCADGui.doCommand("s2 = Arch.createMeshView(FreeCAD.ActiveDocument." + obj.Name + ",FreeCAD.Vector(1,0,0),outeronly=False,largestonly=False)")
FreeCADGui.doCommand("Part.show(s2)")
FreeCADGui.doCommand("s3 = Arch.createMeshView(FreeCAD.ActiveDocument." + obj.Name + ",FreeCAD.Vector(0,1,0),outeronly=False,largestonly=False)")
FreeCADGui.doCommand("Part.show(s3)")
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
return
class _Equipment(ArchComponent.Component):
"The Equipment object"
def __init__(self,obj):
ArchComponent.Component.__init__(self,obj)
obj.addProperty("App::PropertyString","Model","Arch",QT_TRANSLATE_NOOP("App::Property","The model description of this equipment"))
obj.addProperty("App::PropertyString","Url","Arch",QT_TRANSLATE_NOOP("App::Property","The url of the product page of this equipment"))
obj.addProperty("App::PropertyVectorList","SnapPoints","Arch",QT_TRANSLATE_NOOP("App::Property","Additional snap points for this equipment"))
obj.addProperty("App::PropertyFloat","EquipmentPower","Arch",QT_TRANSLATE_NOOP("App::Property","The electric power needed by this equipment in Watts"))
self.Type = "Equipment"
obj.Role = Roles
obj.Proxy = self
obj.setEditorMode("VerticalArea",2)
obj.setEditorMode("HorizontalArea",2)
obj.setEditorMode("PerimeterLength",2)
def onChanged(self,obj,prop):
self.hideSubobjects(obj,prop)
def execute(self,obj):
if self.clone(obj):
return
pl = obj.Placement
if obj.Base:
if obj.isDerivedFrom("Mesh::Feature"):
m = None
if obj.Base.isDerivedFrom("Part::Feature"):
base = obj.Base.Shape.copy()
base = self.processSubShapes(obj,base,pl)
if base:
import Mesh
m = Mesh.Mesh(base.tessellate(1))
elif obj.Base.isDerivedFrom("Mesh::Feature"):
m = obj.Base.Mesh.copy()
if m:
if not pl.isNull():
m.Placement = pl
obj.Mesh = m
else:
base = None
if obj.Base.isDerivedFrom("Part::Feature"):
base = obj.Base.Shape.copy()
elif obj.Base.isDerivedFrom("Mesh::Feature"):
import Part
base = Part.Shape()
base.makeShapeFromMesh(obj.Base.Mesh.Topology,0.05)
base = base.removeSplitteR()
if base:
base = self.processSubShapes(obj,base,pl)
self.applyShape(obj,base,pl,allowinvalid=False,allownosolid=True)
def computeAreas(self,obj):
return
class _ViewProviderEquipment(ArchComponent.ViewProviderComponent):
"A View Provider for the Equipment object"
def __init__(self,vobj):
ArchComponent.ViewProviderComponent.__init__(self,vobj)
def getIcon(self):
import Arch_rc
if hasattr(self,"Object"):
if hasattr(self.Object,"CloneOf"):
if self.Object.CloneOf:
return ":/icons/Arch_Equipment_Clone.svg"
return ":/icons/Arch_Equipment_Tree.svg"
def attach(self, vobj):
from pivy import coin
sep = coin.SoSeparator()
self.coords = coin.SoCoordinate3()
sep.addChild(self.coords)
self.coords.point.deleteValues(0)
symbol = coin.SoMarkerSet()
symbol.markerIndex = coin.SoMarkerSet.CIRCLE_FILLED_5_5
sep.addChild(symbol)
rn = vobj.RootNode
rn.addChild(sep)
ArchComponent.ViewProviderComponent.attach(self,vobj)
def updateData(self, obj, prop):
if prop == "SnapPoints":
if obj.SnapPoints:
self.coords.point.setNum(len(obj.SnapPoints))
self.coords.point.setValues([[p.x,p.y,p.z] for p in obj.SnapPoints])
else:
self.coords.point.deleteValues(0)
if FreeCAD.GuiUp:
FreeCADGui.addCommand('Arch_Equipment',_CommandEquipment())
FreeCADGui.addCommand('Arch_3Views', _Command3Views())
| lgpl-2.1 |
tammyyang/CudaPy | cudapy/compiler.py | 2 | 3440 | # CudaPy module
from ctypes import *
import hashlib, subprocess, tempfile, os.path
from pkg_resources import resource_filename
from cudatypes import *
from template import Template, parseSig
from wrapper import wrapper
# Load the py2cuda library
py2cudaLib = cdll.LoadLibrary(resource_filename(__name__, 'py2cuda.so'))
py2cudaExtern = getattr(py2cudaLib, "py2cuda")
py2cudaExtern.argtypes = [c_char_p, c_char_p]
py2cudaExtern.restype = c_char_p
class CudaPyError (Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def kernel(sig = None, debug = False):
def inner(f):
return compile(f, sig, debug)
return inner
def compile(funs, sigs = None, debug = False):
if not isinstance(funs, list):
return compile([funs], [sigs], debug)
if len(funs) == 0:
return None
if sigs is None:
sigs = [None] * len(funs)
(pySources, sigs) = zip(*map(getSource, funs, sigs))
pySource = "\n\n".join(pySources)
(funExt, sigExt) = zip(*[(fun, sig) for (fun, sig) in zip(funs, sigs) if sig[0] is Void])
funNames = [fun.__name__ for fun in funExt]
debugOut = funNames[0] if len(funNames) > 0 else 'module'
cudaSource = py2cuda(pySource, sigs, output = debugOut + ".cu" if debug else None)
cudaCalls = compileCuda(cudaSource, sigExt, ["__call" + f for f in funNames])
return wrapper(cudaCalls[0], sigExt[0], funNames[0])
# Returns the source code and type signature of the given function object
def getSource(fun, sig = None):
if not isinstance(fun, Template):
fun = Template(fun)
if isinstance(sig, list):
pass
elif isinstance(sig, basestring):
sig = parseSig(fun.__name__ + " : " + sig, fun.__name__)
else:
sig = fun._signature
if sig is None:
raise CudaPyError("function does not have a valid signature: " + fun.__name__)
return (fun._source, sig)
def py2cuda(source, sigs, output = None):
hstypes = [[t._hstype for t in sig] for sig in sigs]
sigEnc = '\n'.join([' '.join(sig) for sig in hstypes])
cudaSource = py2cudaExtern(source, sigEnc)
# Check for errors during translation
[code, cudaSource] = cudaSource.split(':', 1)
if code == "error":
raise CudaPyError(cudaSource)
if output is not None:
with open(output, "w") as f:
f.write(cudaSource)
return cudaSource
def compileCuda(source, sigs, funNames):
libFile = hash(source) + ".so"
if not os.path.isfile(libFile):
flags = ["-O3"]
shared = ["--shared", "--compiler-options", "-fPIC", "-x", "cu"]
warnings = [ "-Xcudafe"
, "--diag_suppress=declared_but_not_referenced"
, "-Xcudafe"
, "--diag_suppress=set_but_not_used"
]
tmpFile = tempfile.NamedTemporaryFile(suffix = '.cu')
tmpFile.write(source)
tmpFile.seek(0)
try:
files = ["-o", libFile, tmpFile.name]
subprocess.check_output(["nvcc"] + flags + shared + warnings + files)
except subprocess.CalledProcessError as e:
print e.output
raise CudaPyError("nvcc exited with error code " + str(e.returncode))
finally:
tmpFile.close()
funs = []
for (sig, funName) in zip(sigs, funNames):
fun = getattr(cdll.LoadLibrary(libFile), funName)
fun.restype = sig[0]._ctype
fun.argtypes = [dim3, dim3] + [t._ctype for t in sig[1:]]
funs.append(fun)
return funs
def hash(str):
return hashlib.sha224(str).hexdigest()[:32]
| mit |
AutorestCI/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/status_codes_based_trigger.py | 4 | 1507 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StatusCodesBasedTrigger(Model):
"""Trigger based on status code.
:param status: HTTP status code.
:type status: int
:param sub_status: SubStatus.
:type sub_status: int
:param win32_status: Win32 error code.
:type win32_status: int
:param count: Count.
:type count: int
:param time_interval: Time interval.
:type time_interval: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'int'},
'sub_status': {'key': 'subStatus', 'type': 'int'},
'win32_status': {'key': 'win32Status', 'type': 'int'},
'count': {'key': 'count', 'type': 'int'},
'time_interval': {'key': 'timeInterval', 'type': 'str'},
}
def __init__(self, status=None, sub_status=None, win32_status=None, count=None, time_interval=None):
self.status = status
self.sub_status = sub_status
self.win32_status = win32_status
self.count = count
self.time_interval = time_interval
| mit |
mjfarmer/scada_py | env/lib/python2.7/site-packages/twisted/spread/ui/tkutil.py | 11 | 12919 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Utilities for building L{PB<twisted.spread.pb>} clients with L{Tkinter}.
"""
from Tkinter import (
ACTIVE, Button, Canvas, E, END, Entry, Frame, Label, LEFT, Listbox,
mainloop, N, S, StringVar, Toplevel, Tk, W)
from tkSimpleDialog import _QueryString
from tkFileDialog import _Dialog
from twisted.spread import pb
from twisted import copyright
import string
#normalFont = Font("-adobe-courier-medium-r-normal-*-*-120-*-*-m-*-iso8859-1")
#boldFont = Font("-adobe-courier-bold-r-normal-*-*-120-*-*-m-*-iso8859-1")
#errorFont = Font("-adobe-courier-medium-o-normal-*-*-120-*-*-m-*-iso8859-1")
class _QueryPassword(_QueryString):
def body(self, master):
w = Label(master, text=self.prompt, justify=LEFT)
w.grid(row=0, padx=5, sticky=W)
self.entry = Entry(master, name="entry",show="*")
self.entry.grid(row=1, padx=5, sticky=W+E)
if self.initialvalue:
self.entry.insert(0, self.initialvalue)
self.entry.select_range(0, END)
return self.entry
def askpassword(title, prompt, **kw):
'''get a password from the user
@param title: the dialog title
@param prompt: the label text
@param **kw: see L{SimpleDialog} class
@returns: a string
'''
d = apply(_QueryPassword, (title, prompt), kw)
return d.result
def grid_setexpand(widget):
cols,rows=widget.grid_size()
for i in range(cols):
widget.columnconfigure(i,weight=1)
for i in range(rows):
widget.rowconfigure(i,weight=1)
class CList(Frame):
def __init__(self,parent,labels,disablesorting=0,**kw):
Frame.__init__(self,parent)
self.labels=labels
self.lists=[]
self.disablesorting=disablesorting
kw["exportselection"]=0
for i in range(len(labels)):
b=Button(self,text=labels[i],anchor=W,height=1,pady=0)
b.config(command=lambda s=self,i=i:s.setSort(i))
b.grid(column=i,row=0,sticky=N+E+W)
box=apply(Listbox,(self,),kw)
box.grid(column=i,row=1,sticky=N+E+S+W)
self.lists.append(box)
grid_setexpand(self)
self.rowconfigure(0,weight=0)
self._callall("bind",'<Button-1>',self.Button1)
self._callall("bind",'<B1-Motion>',self.Button1)
self.bind('<Up>',self.UpKey)
self.bind('<Down>',self.DownKey)
self.sort=None
def _callall(self,funcname,*args,**kw):
rets=[]
for l in self.lists:
func=getattr(l,funcname)
ret=apply(func,args,kw)
if ret!=None: rets.append(ret)
if rets: return rets
def Button1(self,e):
index=self.nearest(e.y)
self.select_clear(0,END)
self.select_set(index)
self.activate(index)
return "break"
def UpKey(self,e):
index=self.index(ACTIVE)
if index:
self.select_clear(0,END)
self.select_set(index-1)
return "break"
def DownKey(self,e):
index=self.index(ACTIVE)
if index!=self.size()-1:
self.select_clear(0,END)
self.select_set(index+1)
return "break"
def setSort(self,index):
if self.sort==None:
self.sort=[index,1]
elif self.sort[0]==index:
self.sort[1]=-self.sort[1]
else:
self.sort=[index,1]
self._sort()
def _sort(self):
if self.disablesorting:
return
if self.sort==None:
return
ind,direc=self.sort
li=list(self.get(0,END))
li.sort(lambda x,y,i=ind,d=direc:d*cmp(x[i],y[i]))
self.delete(0,END)
for l in li:
self._insert(END,l)
def activate(self,index):
self._callall("activate",index)
# def bbox(self,index):
# return self._callall("bbox",index)
def curselection(self):
return self.lists[0].curselection()
def delete(self,*args):
apply(self._callall,("delete",)+args)
def get(self,*args):
bad=apply(self._callall,("get",)+args)
if len(args)==1:
return bad
ret=[]
for i in range(len(bad[0])):
r=[]
for j in range(len(bad)):
r.append(bad[j][i])
ret.append(r)
return ret
def index(self,index):
return self.lists[0].index(index)
def insert(self,index,items):
self._insert(index,items)
self._sort()
def _insert(self,index,items):
for i in range(len(items)):
self.lists[i].insert(index,items[i])
def nearest(self,y):
return self.lists[0].nearest(y)
def see(self,index):
self._callall("see",index)
def size(self):
return self.lists[0].size()
def selection_anchor(self,index):
self._callall("selection_anchor",index)
select_anchor=selection_anchor
def selection_clear(self,*args):
apply(self._callall,("selection_clear",)+args)
select_clear=selection_clear
def selection_includes(self,index):
return self.lists[0].select_includes(index)
select_includes=selection_includes
def selection_set(self,*args):
apply(self._callall,("selection_set",)+args)
select_set=selection_set
def xview(self,*args):
if not args: return self.lists[0].xview()
apply(self._callall,("xview",)+args)
def yview(self,*args):
if not args: return self.lists[0].yview()
apply(self._callall,("yview",)+args)
class ProgressBar:
def __init__(self, master=None, orientation="horizontal",
min=0, max=100, width=100, height=18,
doLabel=1, appearance="sunken",
fillColor="blue", background="gray",
labelColor="yellow", labelFont="Verdana",
labelText="", labelFormat="%d%%",
value=0, bd=2):
# preserve various values
self.master=master
self.orientation=orientation
self.min=min
self.max=max
self.width=width
self.height=height
self.doLabel=doLabel
self.fillColor=fillColor
self.labelFont= labelFont
self.labelColor=labelColor
self.background=background
self.labelText=labelText
self.labelFormat=labelFormat
self.value=value
self.frame=Frame(master, relief=appearance, bd=bd)
self.canvas=Canvas(self.frame, height=height, width=width, bd=0,
highlightthickness=0, background=background)
self.scale=self.canvas.create_rectangle(0, 0, width, height,
fill=fillColor)
self.label=self.canvas.create_text(self.canvas.winfo_reqwidth() / 2,
height / 2, text=labelText,
anchor="c", fill=labelColor,
font=self.labelFont)
self.update()
self.canvas.pack(side='top', fill='x', expand='no')
def updateProgress(self, newValue, newMax=None):
if newMax:
self.max = newMax
self.value = newValue
self.update()
def update(self):
# Trim the values to be between min and max
value=self.value
if value > self.max:
value = self.max
if value < self.min:
value = self.min
# Adjust the rectangle
if self.orientation == "horizontal":
self.canvas.coords(self.scale, 0, 0,
float(value) / self.max * self.width, self.height)
else:
self.canvas.coords(self.scale, 0,
self.height - (float(value) /
self.max*self.height),
self.width, self.height)
# Now update the colors
self.canvas.itemconfig(self.scale, fill=self.fillColor)
self.canvas.itemconfig(self.label, fill=self.labelColor)
# And update the label
if self.doLabel:
if value:
if value >= 0:
pvalue = int((float(value) / float(self.max)) *
100.0)
else:
pvalue = 0
self.canvas.itemconfig(self.label, text=self.labelFormat
% pvalue)
else:
self.canvas.itemconfig(self.label, text='')
else:
self.canvas.itemconfig(self.label, text=self.labelFormat %
self.labelText)
self.canvas.update_idletasks()
class DirectoryBrowser(_Dialog):
command = "tk_chooseDirectory"
def askdirectory(**options):
"Ask for a directory to save to."
return apply(DirectoryBrowser, (), options).show()
class GenericLogin(Toplevel):
def __init__(self,callback,buttons):
Toplevel.__init__(self)
self.callback=callback
Label(self,text="Twisted v%s"%copyright.version).grid(column=0,row=0,columnspan=2)
self.entries={}
row=1
for stuff in buttons:
label,value=stuff[:2]
if len(stuff)==3:
dict=stuff[2]
else: dict={}
Label(self,text=label+": ").grid(column=0,row=row)
e=apply(Entry,(self,),dict)
e.grid(column=1,row=row)
e.insert(0,value)
self.entries[label]=e
row=row+1
Button(self,text="Login",command=self.doLogin).grid(column=0,row=row)
Button(self,text="Cancel",command=self.close).grid(column=1,row=row)
self.protocol('WM_DELETE_WINDOW',self.close)
def close(self):
self.tk.quit()
self.destroy()
def doLogin(self):
values={}
for k in self.entries.keys():
values[string.lower(k)]=self.entries[k].get()
self.callback(values)
self.destroy()
class Login(Toplevel):
def __init__(self,
callback,
referenced = None,
initialUser = "guest",
initialPassword = "guest",
initialHostname = "localhost",
initialService = "",
initialPortno = pb.portno):
Toplevel.__init__(self)
version_label = Label(self,text="Twisted v%s" % copyright.version)
self.pbReferenceable = referenced
self.pbCallback = callback
# version_label.show()
self.username = Entry(self)
self.password = Entry(self,show='*')
self.hostname = Entry(self)
self.service = Entry(self)
self.port = Entry(self)
self.username.insert(0,initialUser)
self.password.insert(0,initialPassword)
self.service.insert(0,initialService)
self.hostname.insert(0,initialHostname)
self.port.insert(0,str(initialPortno))
userlbl=Label(self,text="Username:")
passlbl=Label(self,text="Password:")
servicelbl=Label(self,text="Service:")
hostlbl=Label(self,text="Hostname:")
portlbl=Label(self,text="Port #:")
self.logvar=StringVar()
self.logvar.set("Protocol PB-%s"%pb.Broker.version)
self.logstat = Label(self,textvariable=self.logvar)
self.okbutton = Button(self,text="Log In", command=self.login)
version_label.grid(column=0,row=0,columnspan=2)
z=0
for i in [[userlbl,self.username],
[passlbl,self.password],
[hostlbl,self.hostname],
[servicelbl,self.service],
[portlbl,self.port]]:
i[0].grid(column=0,row=z+1)
i[1].grid(column=1,row=z+1)
z = z+1
self.logstat.grid(column=0,row=6,columnspan=2)
self.okbutton.grid(column=0,row=7,columnspan=2)
self.protocol('WM_DELETE_WINDOW',self.tk.quit)
def loginReset(self):
self.logvar.set("Idle.")
def loginReport(self, txt):
self.logvar.set(txt)
self.after(30000, self.loginReset)
def login(self):
host = self.hostname.get()
port = self.port.get()
service = self.service.get()
try:
port = int(port)
except:
pass
user = self.username.get()
pswd = self.password.get()
pb.connect(host, port, user, pswd, service,
client=self.pbReferenceable).addCallback(self.pbCallback).addErrback(
self.couldNotConnect)
def couldNotConnect(self,f):
self.loginReport("could not connect:"+f.getErrorMessage())
if __name__=="__main__":
root=Tk()
o=CList(root,["Username","Online","Auto-Logon","Gateway"])
o.pack()
for i in range(0,16,4):
o.insert(END,[i,i+1,i+2,i+3])
mainloop()
| gpl-3.0 |
coupdair/pyoptools | pyoptools/gui/glwindow.py | 9 | 19554 | #!/usr/bin/env python
# This includes the two classes wxGLWindow and wxAdvancedGLWindow
# from OpenGL.TK in the PyOpenGL distribution
# ported to wxPython by greg Landrum
# modified by Y. Wong
# modified by R. Amezquita
# modified by O. Olarte
from OpenGL.GL import *
from OpenGL.GLU import *
from wx import *
from wx.glcanvas import *
import math
import os,sys
def test_data(npoints):
#A simple testing function that generate random triangles
# 10 random points (x,y) in the plane
import numpy
import matplotlib.delaunay as triang
x,y = numpy.array(numpy.random.standard_normal((2,npoints)))
z = numpy.array(numpy.random.standard_normal(npoints))
points=[]
for i in range(npoints):
points.append((x[i],y[i],z[i]))
cens,edg,tri,neig = triang.delaunay(x,y)
return points, tri
def glTranslateScene(s, x, y, mousex, mousey):
glMatrixMode(GL_MODELVIEW)
mat = glGetDoublev(GL_MODELVIEW_MATRIX)
glLoadIdentity()
glTranslatef(s * (x - mousex), s * (mousey - y), 0.0)
glMultMatrixd(mat)
def glRotateScene(s, xcenter, ycenter, zcenter, x, y, mousex, mousey):
glMatrixMode(GL_MODELVIEW)
mat = glGetDoublev(GL_MODELVIEW_MATRIX)
glLoadIdentity()
glTranslatef(xcenter, ycenter, zcenter)
glRotatef(s * (y - mousey), 1., 0., 0.)
glRotatef(s * (x - mousex), 0., 1., 0.)
glTranslatef(-xcenter, -ycenter, -zcenter)
glMultMatrixd(mat)
def v3distsq(a,b):
d = ( a[0] - b[0], a[1] - b[1], a[2] - b[2] )
return d[0]*d[0] + d[1]*d[1] + d[2]*d[2]
# This code is needed to avoid faults on sys.exit()
import sys
oldexitfunc = None
if hasattr(sys, 'exitfunc'):
oldexitfunc = sys.exitfunc
def cleanup():
if oldexitfunc: oldexitfunc()
sys.exitfunc = cleanup
class wxGLWindow(GLCanvas):
"""Implements a simple wxPython OpenGL window.
This class provides a simple window, into which GL commands can be issued. This is done by overriding the built in functions InitGL(), DrawGL(), and FinishGL(). The main difference between it and the plain wxGLCanvas is that it copes with refreshing and resizing the window"""
def __init__(self, parent,*args,**kw):
self.GL_uninitialised = 1
apply(GLCanvas.__init__,(self, parent)+args, kw)
EVT_SIZE(self,self.wxSize)
EVT_PAINT(self,self.wxPaint)
EVT_ERASE_BACKGROUND(self, self.wxEraseBackground)
self.w, self.h = self.GetClientSizeTuple()
def __del__(self):
# self.SetCurrent()
self.FinishGL()
def InitGL(self):
"""OpenGL initialisation routine (to be overridden).
This routine, containing purely OpenGL commands, should be overridden by the user to set up the GL scene.
If it is not overridden, it defaults to setting an ambient light, setting the background colour to gray,
and enabling GL_DEPTH_TEST and GL_COLOR_MATERIAL."""
#set up lighting
glLightfv(GL_LIGHT0, GL_AMBIENT, [1.0, 1.0, 1.0, 1.0])
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glClearColor(0.7,0.7,0.7,0.0)
glShadeModel(GL_SMOOTH)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST)
glEnable(GL_COLOR_MATERIAL)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def FinishGL(self):
"""OpenGL closing routine (to be overridden).
This routine should be overridden if necessary by any OpenGL commands need to be specified when deleting the GLWindow (e.g. deleting Display Lists)."""
pass
def DrawGL(self):
"""OpenGL drawing routine (to be overridden).
This routine, containing purely OpenGL commands, should be overridden by the user to draw the GL scene.
If it is not overridden, it defaults to drawing a colour cube."""
#Draw colour cube
glBegin(GL_QUAD_STRIP)
glColor3f(1.0,1.0,1.0) #corner 1
glNormal3f(0.57735027, 0.57735027, 0.57735027)
glVertex3f(0.5, 0.5, 0.5)
glColor3f(1.0,0.0,1.0) #corner 2
glNormal3f(0.57735027, -0.57735027, 0.57735027)
glVertex3f(0.5, -0.5, 0.5)
glColor3f(1.0,1.0,0.0) #corner 3
glNormal3f(0.57735027, 0.57735027, -0.57735027)
glVertex3f(0.5, 0.5, -0.5)
glColor3f(1.0,0.0,0.0) #corner 4
glNormal3f(0.57735027, -0.57735027, -0.57735027)
glVertex3f(0.5, -0.5, -0.5)
glColor3f(0.0,1.0,0.0) #corner 5
glNormal3f(-0.57735027, 0.57735027, -0.57735027)
glVertex3f(-0.5, 0.5, -0.5)
glColor3f(0.0,0.0,0.0) #corner 6
glNormal3f(-0.57735027, -0.57735027, -0.57735027)
glVertex3f(-0.5, -0.5, -0.5)
glColor3f(0.0,1.0,1.0) #corner 7
glNormal3f(-0.57735027, 0.57735027, 0.57735027)
glVertex3f(-0.5, 0.5, 0.5)
glColor3f(0.0,0.0,1.0) #corner 8
glNormal3f(-0.57735027, -0.57735027, 0.57735027)
glVertex3f(-0.5, -0.5, 0.5)
glColor3f(1.0,1.0,1.0) #corner 1
glNormal3f(0.57735027, 0.57735027, 0.57735027)
glVertex3f(0.5, 0.5, 0.5)
glColor3f(1.0,0.0,1.0) #corner 2
glNormal3f(0.57735027, -0.57735027, 0.57735027)
glVertex3f(0.5, -0.5, 0.5)
glEnd()
glBegin(GL_QUADS)
glColor3f(1.0,1.0,1.0) #corner 1
glNormal3f(0.57735027, 0.57735027, 0.57735027)
glVertex3f(0.5, 0.5, 0.5)
glColor3f(1.0,1.0,0.0) #corner 3
glNormal3f(0.57735027, 0.57735027, -0.57735027)
glVertex3f(0.5, 0.5, -0.5)
glColor3f(0.0,1.0,0.0) #corner 5
glNormal3f(-0.57735027, 0.57735027, -0.57735027)
glVertex3f(-0.5, 0.5, -0.5)
glColor3f(0.0,1.0,1.0) #corner 7
glNormal3f(-0.57735027, 0.57735027, 0.57735027)
glVertex3f(-0.5, 0.5, 0.5)
glColor3f(1.0,0.0,1.0) #corner 2
glNormal3f(0.57735027, -0.57735027, 0.57735027)
glVertex3f(0.5, -0.5, 0.5)
glColor3f(1.0,0.0,0.0) #corner 4
glNormal3f(0.57735027, -0.57735027, -0.57735027)
glVertex3f(0.5, -0.5, -0.5)
glColor3f(0.0,0.0,0.0) #corner 6
glNormal3f(-0.57735027, -0.57735027, -0.57735027)
glVertex3f(-0.5, -0.5, -0.5)
glColor3f(0.0,0.0,1.0) #corner 8
glNormal3f(-0.57735027, -0.57735027, 0.57735027)
glVertex3f(-0.5, -0.5, 0.5)
glEnd()
def wxSize(self, event = None):
"""Called when the window is resized"""
self.w,self.h = self.GetClientSizeTuple()
if self.GetContext():
self.SetCurrent()
glViewport(0, 0, self.w, self.h)
self.Update()
def wxEraseBackground(self, event):
"""Routine does nothing, but prevents flashing"""
pass
def wxPaint(self, event=None):
"""Called on a paint event.
This sets the painting drawing context, then calls the base routine wxRedrawGL()"""
dc = PaintDC(self)
self.wxRedrawGL(event)
def wxRedraw(self, event=None):
"""Called on a redraw request
This sets the drawing context, then calls the base routine wxRedrawGL(). It can be called by the user when a refresh is needed"""
dc = ClientDC(self)
self.wxRedrawGL(event)
def wxRedrawGL(self, event=None):
"""This is the routine called when drawing actually takes place.
It needs to be separate so that it can be called by both paint events and by other events. It should not be called directly"""
self.SetCurrent()
if self.GL_uninitialised:
glViewport(0, 0, self.w, self.h)
self.InitGL()
self.GL_uninitialised=0
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
self.DrawGL() # Actually draw here
glPopMatrix();
glFlush() # Flush
self.SwapBuffers() # Swap buffers
if event: event.Skip() # Pass event up
self.Update()
class wxAdvancedGLWindow(wxGLWindow):
"""Implements a wxPython OpenGL window allowing spinning, zooming, etc.
This class is derived from wxGLWindow, and can be used in exactly the
same way, by overriding the functions InitGL(), FinishGL(), and DrawGL()
with functions containing OpenGL commands. The window captures mouse
events, and keypresses. You might want to override some of these
functions if you need more sophisticated control"""
def __init__(self, parent,*args,**kw):
if kw.has_key('autospin_allowed'):
# Is the widget allowed to autospin?
self.autospin_allowed = kw['autospin_allowed']
del kw['autospin_allowed']
else:
self.autospin_allowed = 0
apply(wxGLWindow.__init__,(self, parent)+args, kw)
# The _back color
self.r_back = 0.
self.g_back = 0.
self.b_back = 0.
# Where the eye is
#self.base_distance = self.distance = 10.0
self.base_distance = self.distance = 1000.0
# Field of view in y direction
self.fovy = 30.0
# Position of clipping planes.
self.near = 0.1
self.far = 10000.0
# Where we are centering.
self.xcenter = 0.0
self.ycenter = 0.0
self.zcenter = 0.0
self.parent = parent
# Current coordinates of the mouse.
self.xmouse = 0
self.ymouse = 0
self.xspin = 0
self.yspin = 0
# Is the widget currently autospinning?
self.autospin = 0
self.initLeft = (0,0)
EVT_SIZE(self,self.wxSize)
EVT_PAINT(self,self.wxPaint)
EVT_ERASE_BACKGROUND(self, self.wxEraseBackground)
EVT_CHAR(self,self.OnChar)
EVT_LEFT_DOWN(self,self.OnLeftClick)
EVT_LEFT_DCLICK(self,self.OnLeftDClick)
EVT_LEFT_UP(self,self.OnLeftUp)
EVT_MIDDLE_DOWN(self,self.OnMiddleClick)
EVT_RIGHT_DOWN(self,self.OnRightClick)
EVT_RIGHT_DCLICK(self,self.OnRightDClick)
EVT_MOTION(self,self.wxMouseMotion)
EVT_IDLE(self,self.wxIdle)
def wxIdle(self,event):
if self.autospin:
# self.do_AutoSpin(event) #doing it this way hogs the cpu
# event.RequestMore() #doing it this way hogs the cpu
WakeUpIdle()
self.do_AutoSpin(event)
event.Skip(1)
def OnChar(self,event):
key = event.GetKeyCode()
if key == ord('a'):
self.autospin_allowed = not self.autospin_allowed
if self.autospin:
self.autospin = 0
elif key == ord('q'):
self.parent.Destroy()
def OnLeftClick(self,event):
self.wxRecordMouse(event)
self.initLeft = event.GetX(),event.GetY()
def OnLeftDClick(self,event):
self.wxRecordMouse(event)
self.reset()
def OnLeftUp(self,event):
if not event.m_shiftDown:
self.wxAutoSpin(event)
def OnMiddleClick(self,event):
self.wxRecordMouse(event)
def OnRightClick(self,event):
self.wxRecordMouse(event)
def OnRightDClick(self,event):
self.wxRecordMouse(event)
self.distance=self.base_distance
self.wxRedraw()
def OnLeftDrag(self,event):
self.wxRotate(event)
def OnMiddleDrag(self,event):
self.wxTranslate(event)
def OnRightDrag(self,event):
self.wxScale(event)
def wxMouseMotion(self,event):
if not event.Dragging():
return
if event.LeftIsDown():
self.OnLeftDrag(event)
elif event.MiddleIsDown():
self.OnMiddleDrag(event)
elif event.RightIsDown():
self.OnRightDrag(event)
def report_opengl_errors(message = "OpenGL error:"):
"""Report any opengl errors that occured while drawing."""
while 1:
err_value = glGetError()
if not err_value: break
print message, gluErrorString(err_value)
def SetBgColour(self, r, g, b):
"""Change the background colour of the widget.
There seems to be a problem with this:"""
self.r_back = r
self.g_back = g
self.b_back = b
self.wxRedraw()
def SetCenterpoint(self, x, y, z):
"""Set the new center point for the model.
This is where we are looking."""
self.xcenter = x
self.ycenter = y
self.zcenter = z
self.wxRedraw()
def set_base_distance(self, distance):
"""Set how far the eye is from the position we are looking.
Sets the base distance, to which we are returned if we double click"""
self.base_distance = distance
def set_distance(self, distance):
"""Set how far the eye is from the position we are looking."""
self.distance = distance
self.wxRedraw()
def reset(self):
"""Reset rotation matrix for this widget."""
glMatrixMode(GL_MODELVIEW);
glLoadIdentity()
self.wxRedraw()
# def wxHandlePick(self, event):
# """Handle a pick on the scene."""
# pass
def wxRecordMouse(self, event):
"""Record the current mouse position."""
self.xmouse = event.GetX()
self.ymouse = event.GetY()
def wxStartRotate(self, event):
# Switch off any autospinning if it was happening
self.autospin = 0
self.wxRecordMouse(event)
def wxScale(self, event):
"""Scale the scene. Achieved by moving the eye position."""
scale = 1 - 0.01 * (event.GetY() - self.ymouse)
self.distance = self.distance * scale
self.wxRedraw()
self.wxRecordMouse(event)
def do_AutoSpin(self,event):
s = 0.5
glRotateScene(0.5,
self.xcenter, self.ycenter, self.zcenter,
self.yspin, self.xspin, 0, 0)
self.wxRedraw()
def wxAutoSpin(self, event):
"""Perform autospin of scene."""
if self.autospin_allowed:
self.autospin = 1
self.yspin = .1 * (event.GetX()-self.initLeft[0])
self.xspin = .1 * (event.GetY()-self.initLeft[1])
if self.xspin == 0 and self.yspin == 0:
self.autospin = 0
else:
self.do_AutoSpin(event)
def wxRotate(self, event):
"""Perform rotation of scene."""
if not event.m_shiftDown:
glRotateScene(0.5,
self.xcenter, self.ycenter, self.zcenter,
event.GetX(), event.GetY(), self.xmouse, self.ymouse)
else:
# rotate about z
sz = self.GetClientSizeTuple()
sz = (sz[0]/2, sz[1]/2)
xp = event.GetX()
yp = event.GetY()
dy = (self.ymouse-yp)
dx = (self.xmouse-xp)
if yp > sz[1]:
dx = dx * -1
if xp < sz[0]:
dy = dy * -1
d = dx + dy
glMatrixMode(GL_MODELVIEW);
m = glGetDouble(GL_MODELVIEW_MATRIX)
glLoadIdentity()
glTranslatef(self.xcenter,self.ycenter,self.zcenter)
glRotatef(.5*d,0,0,1.)
glTranslatef(-self.xcenter,-self.ycenter,-self.zcenter)
#glMultMatrixd(ravel(m)) #from Numeric...
glMultMatrixd(m)
self.wxRedraw()
self.wxRecordMouse(event)
def wxTranslate(self, event):
"""Perform translation of scene."""
# Scale mouse translations to object viewplane so object tracks with mouse
win_height = max( 1,self.w)
obj_c = (self.xcenter, self.ycenter, self.zcenter)
win = gluProject( obj_c[0], obj_c[1], obj_c[2] )
obj = gluUnProject( win[0], win[1] + 0.5 * win_height, win[2] )
dist = math.sqrt( v3distsq( obj, obj_c ) )
scale = abs( dist / ( 0.5 * win_height ) )
glTranslateScene(scale, event.GetX(), event.GetY(), self.xmouse, self.ymouse)
self.wxRedraw()
self.wxRecordMouse(event)
def wxRedrawGL(self, event=None):
"""Method used to actually draw the scene.
This is more complex than in the wxGLWindow class from which this
class is derived, as we need to do rotations, translations, etc."""
self.SetCurrent()
if self.GL_uninitialised:
glViewport(0, 0, self.w, self.h)
self.InitGL()
self.GL_uninitialised = 0
# Clear the background and depth buffer.
glClearColor(self.r_back, self.g_back, self.b_back, 0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_PROJECTION);
glLoadIdentity()
gluPerspective(self.fovy, float(self.w)/float(self.h), self.near, self.far)
gluLookAt(self.xcenter, self.ycenter, self.zcenter + self.distance,
self.xcenter, self.ycenter, self.zcenter,
0., 1., 0.)
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
self.DrawGL() # Actually draw here
glPopMatrix();
glFlush() # Tidy up
self.SwapBuffers()
if event: event.Skip()
#-----------------------------------------------------
if __name__ == '__main__':
from OpenGL.GLUT import *
import array #for creating the texture map
class MyApp(App):
def OnInit(self):
frame = Frame(None, -1, "wxPython OpenGL example", DefaultPosition, Size(400,400))
#win1 = wxGLWindow(frame, -1, Point(5,5), Size(190,190))
#win2 = wxAdvancedGLWindow(frame, -1, Point(205,5), Size(190,190), autospin_allowed = 0)
win3 = MyWin1_1(frame, -1, Point(5,205), Size(190,190), autospin_allowed = 0)
#win4 = MyWin2(frame, -1, Point(205,205), Size(190,190))
# win1.SetScrollbars(0,0,0,0)
# win4.SetScrollbars(0,0,0,0)
# win3.SetBgColour(0.0,0.0,1.0)
frame.Show(True)
self.SetTopWindow(frame)
return True
class MyWin1(wxAdvancedGLWindow):
"""basic example of a wxAdvancedGLWindow"""
def DrawGL(self):
glColor3f(1.0,0.3,0.3)
glutSolidCone(1.0,2,20,16)
glRotatef(180.0,0.0,1.0,0.0)
glColor3f(0.3,1.0,0.3)
glutSolidCone(1.0,1,20,16)
glLoadIdentity()
class MyWin1_1(wxAdvancedGLWindow):
"""basic example of a wxAdvancedGLWindow with basic triangles"""
def InitGL(self):
self.points,self.polylist=test_data(1000)
print self.points, self.polylist
def DrawGL(self):
glColor4f(.1,.7,.7, 0.5)
for p in self.polylist:
if len(p)==3:
p0=self.points[p[0]]
p1=self.points[p[1]]
p2=self.points[p[2]]
glBegin(GL_TRIANGLES) #Drawing Using Triangles
glVertex3f( p0[0], p0[1], p0[2])
glVertex3f( p1[0], p1[1], p1[2])
glVertex3f( p2[0], p2[1], p2[2])
glEnd()
class MyWin2(wxAdvancedGLWindow):
"""example using display lists"""
def InitGL(self):
self.uninitialised = 1
glClearColor (0.0, 0.0, 0.0, 0.0);
glEnable(GL_DEPTH_TEST);
glShadeModel(GL_SMOOTH);
self.stripeImageWidth=32
temp = array.array('B')
for x in range(5):
temp.fromlist([255,0,0,255])
for x in range(self.stripeImageWidth-5):
temp.fromlist([0,255,0,255])
self.stripeImage = temp.tostring()
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
self.texName=glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.texName)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, self.stripeImageWidth,1,0,
GL_RGBA, GL_UNSIGNED_BYTE, [self.stripeImage])
glTexImage2D(GL_TEXTURE_2D, 0, 4, self.stripeImageWidth, 1, 0,
GL_RGBA, GL_UNSIGNED_BYTE, [self.stripeImage])
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE)
glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR)
glTexGenfv(GL_S, GL_EYE_PLANE, [1.0, 1.0, 1.0, 0.0])
glEnable(GL_TEXTURE_GEN_S);
glEnable(GL_TEXTURE_2D);
glEnable(GL_CULL_FACE);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_AUTO_NORMAL);
glEnable(GL_NORMALIZE);
glFrontFace(GL_CW);
glCullFace(GL_BACK);
glMaterialf (GL_FRONT, GL_SHININESS, 64.0);
self.DispList=glGenLists(1)
def DrawGL(self):
if self.uninitialised:
glNewList(self.DispList, GL_COMPILE)
glRotatef(45.0, 0.0, 0.0, 1.0);
glBindTexture(GL_TEXTURE_2D, self.texName);
glutSolidTeapot(2.0);
glEndList()
self.uninitialised = 0
glCallList(self.DispList)
def FinishGL(self):
if self.DispList:
glDeleteLists(self.DispList)
app = MyApp(0)
app.MainLoop()
| bsd-3-clause |
duyetdev/openerp-6.1.1 | openerp/addons/hr_timesheet_invoice/wizard/hr_timesheet_invoice_create.py | 3 | 11299 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from osv import osv, fields
from tools.translate import _
## Create an invoice based on selected timesheet lines
#
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
#
# data = {
# 'date': boolean
# 'time': boolean
# 'name': boolean
# 'price': boolean
# 'product': many2one id
# }
def invoice_cost_create(self, cr, uid, ids, data={}, context=None):
analytic_account_obj = self.pool.get('account.analytic.account')
res_partner_obj = self.pool.get('res.partner')
account_payment_term_obj = self.pool.get('account.payment.term')
invoice_obj = self.pool.get('account.invoice')
product_obj = self.pool.get('product.product')
invoice_factor_obj = self.pool.get('hr_timesheet_invoice.factor')
pro_price_obj = self.pool.get('product.pricelist')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
product_uom_obj = self.pool.get('product.uom')
invoice_line_obj = self.pool.get('account.invoice.line')
invoices = []
if context is None:
context = {}
account_ids = {}
for line in self.pool.get('account.analytic.line').browse(cr, uid, ids, context=context):
account_ids[line.account_id.id] = True
account_ids = account_ids.keys() #data['accounts']
for account in analytic_account_obj.browse(cr, uid, account_ids, context=context):
partner = account.partner_id
if (not partner) or not (account.pricelist_id):
raise osv.except_osv(_('Analytic Account incomplete'),
_('Please fill in the Partner or Customer and Sale Pricelist fields in the Analytic Account:\n%s') % (account.name,))
if not partner.address:
raise osv.except_osv(_('Partner incomplete'),
_('Please fill in the Address field in the Partner: %s.') % (partner.name,))
date_due = False
if partner.property_payment_term:
pterm_list= account_payment_term_obj.compute(cr, uid,
partner.property_payment_term.id, value=1,
date_ref=time.strftime('%Y-%m-%d'))
if pterm_list:
pterm_list = [line[0] for line in pterm_list]
pterm_list.sort()
date_due = pterm_list[-1]
curr_invoice = {
'name': time.strftime('%d/%m/%Y')+' - '+account.name,
'partner_id': account.partner_id.id,
'address_contact_id': res_partner_obj.address_get(cr, uid,
[account.partner_id.id], adr_pref=['contact'])['contact'],
'address_invoice_id': res_partner_obj.address_get(cr, uid,
[account.partner_id.id], adr_pref=['invoice'])['invoice'],
'payment_term': partner.property_payment_term.id or False,
'account_id': partner.property_account_receivable.id,
'currency_id': account.pricelist_id.currency_id.id,
'date_due': date_due,
'fiscal_position': account.partner_id.property_account_position.id
}
last_invoice = invoice_obj.create(cr, uid, curr_invoice, context=context)
invoices.append(last_invoice)
context2 = context.copy()
context2['lang'] = partner.lang
cr.execute("SELECT product_id, to_invoice, sum(unit_amount), product_uom_id " \
"FROM account_analytic_line as line " \
"WHERE account_id = %s " \
"AND id IN %s AND to_invoice IS NOT NULL " \
"GROUP BY product_id,to_invoice,product_uom_id", (account.id, tuple(ids),))
for product_id, factor_id, qty, uom in cr.fetchall():
product = product_obj.browse(cr, uid, product_id, context2)
if not product:
raise osv.except_osv(_('Error'), _('At least one line has no product !'))
factor_name = ''
factor = invoice_factor_obj.browse(cr, uid, factor_id, context2)
if not data.get('product', False):
if factor.customer_name:
factor_name = product.name+' - '+factor.customer_name
else:
factor_name = product.name
else:
data['product'] = data['product'][0]
product = product_obj.browse(cr, uid, data['product'], context=context2)
factor_name = product_obj.name_get(cr, uid, [data['product']], context=context)[0][1]
ctx = context.copy()
ctx.update({'uom':uom})
if account.pricelist_id:
pl = account.pricelist_id.id
price = pro_price_obj.price_get(cr,uid,[pl], data.get('product', False) or product_id, qty or 1.0, account.partner_id.id, context=ctx)[pl]
else:
price = 0.0
taxes = product.taxes_id
tax = fiscal_pos_obj.map_tax(cr, uid, account.partner_id.property_account_position, taxes)
account_id = product.product_tmpl_id.property_account_income.id or product.categ_id.property_account_income_categ.id
if not account_id:
raise osv.except_osv(_("Configuration Error"), _("No income account defined for product '%s'") % product.name)
curr_line = {
'price_unit': price,
'quantity': qty,
'discount':factor.factor,
'invoice_line_tax_id': [(6,0,tax )],
'invoice_id': last_invoice,
'name': factor_name,
'product_id': data.get('product',product_id),
'invoice_line_tax_id': [(6,0,tax)],
'uos_id': uom,
'account_id': account_id,
'account_analytic_id': account.id,
}
#
# Compute for lines
#
cr.execute("SELECT * FROM account_analytic_line WHERE account_id = %s and id IN %s AND product_id=%s and to_invoice=%s ORDER BY account_analytic_line.date", (account.id, tuple(ids), product_id, factor_id))
line_ids = cr.dictfetchall()
note = []
for line in line_ids:
# set invoice_line_note
details = []
if data.get('date', False):
details.append(line['date'])
if data.get('time', False):
if line['product_uom_id']:
details.append("%s %s" % (line['unit_amount'], product_uom_obj.browse(cr, uid, [line['product_uom_id']],context2)[0].name))
else:
details.append("%s" % (line['unit_amount'], ))
if data.get('name', False):
details.append(line['name'])
note.append(u' - '.join(map(lambda x: unicode(x) or '',details)))
curr_line['note'] = "\n".join(map(lambda x: unicode(x) or '',note))
invoice_line_obj.create(cr, uid, curr_line, context=context)
cr.execute("update account_analytic_line set invoice_id=%s WHERE account_id = %s and id IN %s", (last_invoice, account.id, tuple(ids)))
invoice_obj.button_reset_taxes(cr, uid, [last_invoice], context)
return invoices
#
# TODO: check unit of measure !!!
#
class hr_timesheet_invoice_create(osv.osv_memory):
_name = 'hr.timesheet.invoice.create'
_description = 'Create invoice from timesheet'
_columns = {
'date': fields.boolean('Date', help='The real date of each work will be displayed on the invoice'),
'time': fields.boolean('Time spent', help='The time of each work done will be displayed on the invoice'),
'name': fields.boolean('Description', help='The detail of each work done will be displayed on the invoice'),
'price': fields.boolean('Cost', help='The cost of each work done will be displayed on the invoice. You probably don\'t want to check this'),
'product': fields.many2one('product.product', 'Product', help='Complete this field only if you want to force to use a specific product. Keep empty to use the real product that comes from the cost.'),
}
_defaults = {
'date': lambda *args: 1,
'name': lambda *args: 1
}
def view_init(self, cr, uid, fields, context=None):
"""
This function checks for precondition before wizard executes
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param fields: List of fields for default value
@param context: A standard dictionary for contextual values
"""
analytic_obj = self.pool.get('account.analytic.line')
data = context and context.get('active_ids', [])
for analytic in analytic_obj.browse(cr, uid, data, context=context):
if analytic.invoice_id:
raise osv.except_osv(_('Warning !'), _("Invoice is already linked to some of the analytic line(s)!"))
def do_create(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, [], context=context)[0]
invs = self.pool.get('account.analytic.line').invoice_cost_create(cr, uid, context['active_ids'], data, context=context)
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
mod_ids = mod_obj.search(cr, uid, [('name', '=', 'action_invoice_tree1')], context=context)[0]
res_id = mod_obj.read(cr, uid, mod_ids, ['res_id'], context=context)['res_id']
act_win = act_obj.read(cr, uid, res_id, [], context=context)
act_win['domain'] = [('id','in',invs),('type','=','out_invoice')]
act_win['name'] = _('Invoices')
return act_win
hr_timesheet_invoice_create()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ucb-bar/bits | workloads/solr/solrcloud_firebox.py | 1 | 28477 | #! /usr/bin/env python
# Contributors:
# Zach Rowinski <zach@eecs.berkeley.edu> (2015)
from collections import defaultdict
from math import ceil
from time import sleep
import argparse
import getpass
import glob
import json
import os
import subprocess
import sys
import time
import urllib2
import atexit
import datetime
import time
DEFAULT_SOLR_PORT = 8983
USER = getpass.getuser()
HOME_DIR = '/nscratch/{}/'.format(USER)
CONF_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'conf/')
WORK_DIR = HOME_DIR + 'solrcloud-firebox/'
SOLR_DOCS_DIR = HOME_DIR + 'fbox-data/'
SOLR_XML_DIR = 'example/solr/'
SOLR_XML = 'solr.xml'
SOLR_CONF_DIR = 'example/solr/collection1/conf/'
SOLR_CONF = 'solrconfig.xml'
SCHEMA_CONF = 'schema.xml'
DATA_CONFIG = 'data-config.xml'
HOST_CONF = 'solrcloud-hosts.conf'
REMOTE_DIR = '/data/solar/' # or '/pcie_data/solr/'
RESULTS_DIR = '/nscratch/' + USER + '/solr-results/'
LATEST_SOLR_RES_DIR = os.path.join(RESULTS_DIR, 'latest-solr')
JMETER_TEMPLATE = 'jmeter/jmetertesting.jmx'
if not os.path.exists(RESULTS_DIR):
os.mkdir(RESULTS_DIR)
zk_version = '3.4.6'
solr_version = '4.10.1'
zk_download_url = 'http://apache.mirrors.pair.com/zookeeper/'
zk_download_url += 'zookeeper-{version}/'
zk_download_url += 'zookeeper-{version}.tar.gz'.format(version=zk_version)
zk_app_zip = 'zookeeper-{version}.tar.gz'.format(version=zk_version)
zk_app = 'zookeeper-{version}'.format(version=zk_version)
local_zk_zip = WORK_DIR + zk_app_zip
zk_dir = REMOTE_DIR + zk_app
zk_data_dir = os.path.join(zk_dir, 'data')
zk_conf_dir = os.path.join(zk_dir, 'conf')
solr_app_tgz = 'solr-{version}.tgz'.format(version=solr_version)
solr_app = 'solr-{version}'.format(version=solr_version)
local_solr_tgz = os.path.join(WORK_DIR, solr_app_tgz)
DNULL = open(os.devnull, 'w')
#------- Helper functions ----------------------------------------------------
class HeadRequest(urllib2.Request):
def get_method(self):
return 'HEAD'
#------- Zookeeper -----------------------------------------------------------
zk_processes = {}
def _gen_zoo_cfg(zkHosts):
# zkhosts is a list with addresses to hosts
zoo_cfg = ''
zoo_cfg += 'tickTime=2000\n'
zoo_cfg += 'initLimit=10\n'
zoo_cfg += 'syncLimit=5\n'
zoo_cfg += 'dataDir={}\n'.format(zk_data_dir)
zoo_cfg += 'clientPort=2181\n'
numHosts = len(zkHosts)
if numHosts > 1:
for z, k in enumerate(zkHosts):
zoo_cfg += 'server.%d=%s:2888:3888\n' % (z + 1, k)
return zoo_cfg
def setup_zk_ensemble(hosts):
# Remove previous instance of app
remote_zip = REMOTE_DIR + zk_app_zip
print '> Removing previous copy of app...',
for h in hosts:
p = subprocess.Popen(['ssh', h, 'rm', '-rf', zk_dir, remote_zip])
p.wait()
print 'Done'
# Copy zk over and unpack it
print '> Copying Zookeeper'
for h in hosts:
subprocess.call(['ssh', h, 'cp', local_zk_zip, remote_zip])
subprocess.call(' '.join(['ssh', h, 'tar', 'xfzv', remote_zip,
'-C', REMOTE_DIR,
'&>', '/dev/null']), shell=True)
# Generate conf and broadcast to nodes
print '> Generating Zookeeper conf files'
for h in hosts:
subprocess.call(['ssh', h, 'mkdir', zk_data_dir])
conf = _gen_zoo_cfg(hosts)
zoocfg = os.path.join(os.path.dirname(os.path.abspath(__file__)), \
'zoo.cfg')
open(zoocfg, 'w').write(conf)
for h in hosts:
subprocess.call(['ssh', h, 'cp', zoocfg, zk_conf_dir])
for n, h in enumerate(hosts):
subprocess.call('echo %d | ssh %s "cat > %s"' % (n + 1, \
h, zk_data_dir + '/myid'), shell=True)
subprocess.call(['ssh', h, 'rm', '-f', remote_zip])
def start_zk_ensemble(zk_hosts):
for h in zk_hosts:
print '> Starting Zookeeper host on {}'.format(h)
srun_cmd = ' '.join(['ssh', '-t', h]) # for each node...
# Start each zk instance.
srun_cmd2 = ' '.join(['cd', '{};'.format(os.path.join(zk_dir, 'bin')),\
os.path.join(zk_dir, 'bin/zkServer.sh'),\
'start'])
srun_cmd2 = '"' + srun_cmd2 + '"'
p = subprocess.Popen(' '.join([srun_cmd, srun_cmd2]), shell=True)
zk_processes[h] = {'process': p}
p.wait()
sleep(2)
check_instances_running(zk_hosts, 'zk')
def stop_zk_ensemble():
print '> Stopping Zookeeper ensemble'
zk_hosts = get_hosts()['zk_hosts']
# Kill all zookeeper processes started by current user
for h in zk_hosts:
subprocess.call(['ssh', h, 'pkill', '-f', '-U', USER,
'zookeeper'], stderr=DNULL)
def _check_zk_instance(host):
output = subprocess.check_output('echo srvr | nc {} 2181'.format(host), \
shell=True)
if output is not None and output.lower().startswith('zookeeper version'):
return True
else:
return False
def _zk_host_str():
zk_hosts = get_hosts()['zk_hosts']
return ','.join([h + ':2181' for h in zk_hosts])
#------- Solr ----------------------------------------------------------------
solr_processes = defaultdict(dict)
def _check_solr_running(instance):
running = False
try:
urllib2.urlopen(HeadRequest('http://%s/solr/#/' % instance))
running = True
except:
running = False
return running
def check_instances_running(instances, role):
if role == 'solr':
check_func = _check_solr_running
elif role == 'zk':
check_func = _check_zk_instance
all_running = False
max_time = 255 # seconds
elapsed_time = 0
i = 0
while not all_running:
if elapsed_time >= max_time:
print '> Time expired for {} check.'.format(role)
print '> Not all {} instances are running.'.format(role)
return all_running
all_running = all(map(check_func, instances))
wait_time = 2 ** i # back off exponentially
i += 1
if not all_running:
print '> Waiting for instances to start.',
print 'Will check again in {} seconds'.format(wait_time)
time.sleep(wait_time)
elapsed_time += wait_time
print '> All {} instances are running'.format(role)
return all_running
def _install_new_solr_instance(host, cur_id, remote_zip):
cur_dir = os.path.join(REMOTE_DIR, str(cur_id))
cur_solr_dir = os.path.join(cur_dir, solr_app)
srun_cmd = ['ssh', host]
srun_cmd1 = ['rm', '-rf', cur_dir] # Remove existing dir
srun_cmd2 = ['mkdir', cur_dir] # Make new dir
srun_cmd3 = ['tar', 'xzfv', remote_zip, '-C', cur_dir, '&>',
'/dev/null']
# srun_cmd3 = ['tar', 'xzfv', remote_zip, '-C', cur_dir]
srun_cmd4 = ['cp', CONF_DIR + SCHEMA_CONF, os.path.join(
cur_solr_dir, os.path.join(SOLR_CONF_DIR, SCHEMA_CONF))]
srun_cmd5 = ['cp', CONF_DIR + SOLR_CONF, os.path.join(
cur_solr_dir, os.path.join(SOLR_CONF_DIR, SOLR_CONF))]
srun_cmd6 = ['cp', CONF_DIR + DATA_CONFIG, os.path.join(
cur_solr_dir, os.path.join(SOLR_CONF_DIR, DATA_CONFIG))]
# Remove previous solr dir
subprocess.call(srun_cmd + srun_cmd1)
# Create new dir, unzip new solr instance into it
subprocess.call(srun_cmd + srun_cmd2)
subprocess.call(' '.join(srun_cmd + srun_cmd3), shell=True)
# Copy config file to new instance
subprocess.call(srun_cmd + srun_cmd4)
subprocess.call(srun_cmd + srun_cmd5)
subprocess.call(srun_cmd + srun_cmd6)
def setup_solr_instances(hosts, n_instances, install_new=True):
'''Setup solr instances. Copy a version of solr to nodes in a round
robin fashion. If install_new = False, just return the nodes assignments
(host, port) of each instance.
'''
remote_zip = os.path.join(REMOTE_DIR, solr_app_tgz)
cur_dir_id = DEFAULT_SOLR_PORT
# Broadcast a copy of Solr to all the nodes
if install_new:
for h in hosts:
subprocess.call(['ssh', h, 'cp', local_solr_tgz, remote_zip])
# Add hosts to nodes in a round-robin manner
rounds = int(ceil(n_instances / float(len(hosts))))
added = 0
instance_ports = defaultdict(list)
print '> Setting up Solr instances...'
for r in range(rounds):
for n, h in enumerate(hosts):
if added + 1 > n_instances:
break
if install_new:
print '> Setting up Solr instance {} on host {}:{}'.format(
added + 1, h, cur_dir_id)
_install_new_solr_instance(h, cur_dir_id, remote_zip)
instance_ports[h].append(cur_dir_id)
added += 1
cur_dir_id += 1
return instance_ports
def _start_instances(instance_hosts, n_shards, zk_hosts_str=None):
if not zk_hosts_str:
zk_hosts_str = _zk_host_str()
all_instances = []
for h in instance_hosts:
srun_cmd = ' '.join(['ssh', '-f', '-n', h])
for port in instance_hosts[h]:
# Collection host:port string of all instances
all_instances.append('{host}:{port}'.format(host=h, port=port))
# increment jmx port with the initial solr port (8983)
jmx_port = 9010 + int(port) - DEFAULT_SOLR_PORT
# cd into the solr instance dir that will be run
cur_dir = os.path.join(str(port), solr_app + '/example')
solr_dir = os.path.join(REMOTE_DIR, cur_dir)
srun_cmd2 = ' '.join(['cd', solr_dir + ';',
'nohup', 'java',
'-XX:+UseConcMarkSweepGC',
# '-XX:+UseSerialGC',
# '-XX:+UseParallelGC',
# '-XX:+UseG1GC',
'-XX:+PrintGCApplicationStoppedTime',
'-Xmx10g',
'-DnumShards=' + str(n_shards),
'-Dbootstrap_confdir=./solr/collection1/conf',
'-Dcollection.configName=myconf',
'-Djetty.port=' + str(port),
'-DzkHost=' + zk_hosts_str,
'-Dhttp.maxConnections=10',
'-Dcom.sun.management.jmxremote',
'-Dcom.sun.management.jmxremote.port=' + str(jmx_port),
'-Dcom.sun.management.jmxremote.local.only=false',
'-Dcom.sun.management.jmxremote.authenticate=false',
'-Dcom.sun.management.jmxremote.ssl=false', '-jar',
os.path.join(solr_dir, 'start.jar'),
'>', '/dev/null', '2>&1', '&'])
srun_cmd2 = '"sh -c \'' + srun_cmd2 + '\'"'
print '> Starting solr instance at {}:{}'.format(h, port)
p = subprocess.Popen(' ' .join([srun_cmd, srun_cmd2]),
shell=True)
p.wait()
solr_processes[h][port] = {'process': p}
print '> Waiting for Solr instances to start...'
time.sleep(2)
if not check_instances_running(all_instances, 'solr'):
print '[ERROR] Failed to start all instances in the allotted time'
print '[ERROR] Check /{}/example/logs for more information'.format(
solr_version)
sys.exit(0)
def run_solr_instances(instance_hosts, zk_hosts, n_shards):
zk_hosts_str = _zk_host_str()
_start_instances(instance_hosts, n_shards, zk_hosts_str)
def stop_solr():
hosts = get_hosts()['solr_hosts']
for host in hosts:
print '> Stopping Solr instances on', host
cur_dir = os.path.join(str(DEFAULT_SOLR_PORT), solr_app + '/bin')
solr_dir = os.path.join(REMOTE_DIR, cur_dir)
subprocess.call(['ssh', host, 'pkill', '-f',
'start.jar', '-U', USER],
stderr=DNULL)
def restart_solr_instances(n_instances=3, n_shards=3):
'''Restart Solr instances from a previous session. It is important that
the n_instances and n_shards parameters are set to the exact same values
as they were when Solr was last run. Zookeeper must be running'''
stop_solr()
time.sleep(5)
all_hosts = get_hosts()
solr_hosts = all_hosts['solr_hosts']
zk_hosts = all_hosts['zk_hosts']
if not check_instances_running(zk_hosts, 'zk'):
print '[ERROR] Zookeeper must be running in order to restart Solr'
print '[ERROR] Exiting...'
return
zk_host_str = _zk_host_str()
print '> Restarting solr instances with zk servers: {}'.format(zk_host_str)
instances = setup_solr_instances(solr_hosts, n_instances, \
install_new=False)
_start_instances(instances, n_shards, zk_host_str)
def add_solr_instances(n_instances, n_shards):
'''Add instances to an existing, healthy* cluster.
*No nodes with indexed data should be down.
'''
remote_zip = os.path.join(REMOTE_DIR, solr_app_tgz)
hosts = get_hosts()['solr_hosts']
# Get a list of all instances (hosts:ports) that are already running
# or are being added
all_instances = setup_solr_instances(hosts, n_instances, install_new=False)
# Collect the names of the instances that need to be newly created
instances_to_create = defaultdict(list)
for host in all_instances:
for port in all_instances[host]:
instance = ':'.join([host, str(port)])
if not _check_solr_running(instance):
instances_to_create[host].append(port)
_install_new_solr_instance(host, str(port), remote_zip)
# Start just the new instances
_start_instances(instances_to_create, 3)
### Optional: Run nginx as a load-balancer
def setup_nginx(n_instances):
'''Setup nginx on fbox. This must be called for before running run_test.'''
# Build the site-available conf file from the list of running instances
hosts = get_hosts()['solr_hosts']
all_instances = setup_solr_instances(hosts, n_instances, install_new=False)
running_hosts = []
for k in all_instances:
for v in all_instances[k]:
running_hosts.append(
' server {host}:{port};'.format(host=k,port=v))
conf = '''
server {
listen 7777;
server_name fbox;
location / {
proxy_pass http://backend;
}
}
upstream backend {
%s
}''' % '\n'.join(running_hosts)
# Write site-available conf to tmp and then copy / sym-link appropriately
conf_file = '/tmp/fboxsolr'
dest = '/etc/nginx/sites-available/fboxsolr'
sites_enabled = '/etc/nginx/sites-enabled/fboxsolr'
open(conf_file, 'w').write(conf)
subprocess.call(['sudo', 'mv', conf_file, \
'/etc/nginx/sites-available/fboxsolr'])
if not os.path.exists(sites_enabled):
subprocess.call(['sudo', 'ln', '-s', dest, \
sites_enabled])
subprocess.call(['sudo', 'service', 'nginx', 'restart'])
#------- Add / Query documents -----------------------------------------------
def submit_doc(url):
return subprocess.call(' '.join(['curl', url]), shell=True)
def index_sample_documents(dir=SOLR_DOCS_DIR):
'''Index the complete (200GB) of synthetic documents generated from Google
n-grams'''
print '> Indexing sample documents. Warning: this may take a few hours'
docs = []
hosts = get_hosts()['solr_hosts']
target = "http://{}:8983/solr/update".format(hosts[0])
params = '?stream.file={}&stream.contentType=application/json;charset=utf-8'
params += '&commit=true'
jsn_docs = glob.glob(os.path.join(SOLR_DOCS_DIR, '*json'))
jsn_docs.sort()
for d in jsn_docs:
print '> Submitting {} for indexing ... '.format(d),
res = urllib2.urlopen(target+params.format(d))
if res.getcode() != 200:
print '> There was an error indexing file {}'.format(d)
continue
else:
print '> Finished'
def test_query():
host = get_hosts()['solr_hosts'][0] # Pick first solr node off list
url = '"http://{host}:8983'.format(host=host)
print '> Submitting test query to {}:8983'.format(host)
url += '/solr/select?df=text&fl=id&q=computer+science"'
subprocess.call('curl ' + url, shell=True)
def run_test(duration=180, users=100, qps=400):
'''Run a benchmark test using jmeter
qps: int - number of queries to run in one second
users: int - number of threads java will use
duration: int - time in seconds of the experiment
'''
rate = qps * 60 # jmeter uses queries per minute
run_dir = 'solr-' + datetime.datetime.fromtimestamp(time.time()). \
strftime('%Y-%m-%d-%H-%M-%S')
run_dir = os.path.join(RESULTS_DIR, run_dir)
os.mkdir(run_dir)
subprocess.call(['ln', '-s', '-f', '-T', run_dir, LATEST_SOLR_RES_DIR])
c = open(JMETER_TEMPLATE, "r").read()
c = c % (users, duration, rate, USER)
jmeter_path = os.path.join(LATEST_SOLR_RES_DIR, 'jmeter_run.jmx')
system_settings_path = os.path.join(LATEST_SOLR_RES_DIR, 'system.json')
with open(jmeter_path, 'w') as f:
f.write(c)
print '> Saving system settings'
solr_hosts = get_hosts()['solr_hosts']
solr_hosts.sort()
host = solr_hosts[0]
try:
res = urllib2.urlopen('http://{}.millennium.berkeley.edu:8983/{}'.format(\
host, 'solr/admin/system?wt=json&indent=true'))
if res.getcode() == 200:
jsn = res.read()
with open(system_settings_path, 'w') as f:
f.write(jsn)
except:
import traceback;traceback.print_exc()
print '[ERROR] Problem saving Java run-time settings'
print '> Starting benchmark test'
p = subprocess.call(['jmeter', '-n', '-t', jmeter_path])
def load_wiki_documents():
'''Use Solr's Data Import Handler to load in a set of Wikipedia documents'''
solr_hosts = get_hosts()['solr_hosts']
solr_hosts.sort()
host = solr_hosts[0]
base_url = 'http://{host}.millennium.berkeley.edu:8983/solr'.\
format(host=host)
progress_url = base_url + '/#/collection1/dataimport//dataimport'
import_url = base_url + '/collection1/dataimport?command=full-import'
status_url = base_url + '/collection1/dataimport?command=status&wt=json'
res = urllib2.urlopen(import_url)
if res.getcode() != 200:
print '[ERROR] There was a problem accessing {}'.format(import_url)
else:
print '> Starting to index the Wikipedia collection. This may take up \
to an hour to complete.'
print '> To view progress, visit {}'.format(progress_url)
return status_url
#------- High level functions
def run_demo(num_shards=3, n_instances=3):
'''Run a demonstration of all the steps needed to setup a SolrCloud cluster
and populate it with data.
'''
hosts = get_hosts()
zk_hosts = hosts['zk_hosts']
solr_hosts = hosts['solr_hosts']
setup_zk_ensemble(zk_hosts)
start_zk_ensemble(zk_hosts)
solr_hosts = setup_solr_instances(solr_hosts, n_instances,
install_new=True)
run_solr_instances(solr_hosts, zk_hosts=zk_hosts, n_shards=num_shards)
subprocess.call(['sleep', '3'])
index_prompt = raw_input('''Would you like to start indexing the sample
collection (/nscratch/zach/fbox-data)? Warning: indexing may take a few
hours [Y/n]''')
if index_prompt.upper() == 'Y':
index_sample_documents()
def setup(num_shards=3, n_instances=3, collection='wikipedia'):
'''Setup nodes for a fixed number of shards and instances.'''
hosts = get_hosts()
zk_hosts = hosts['zk_hosts']
solr_hosts = hosts['solr_hosts']
setup_zk_ensemble(zk_hosts)
solr_hosts = setup_solr_instances(solr_hosts, n_instances,
install_new=True)
def start(num_shards=3, n_instances=3, collection='wikipedia'):
'''Start the solr and zookeeper nodes. Being indexing the collection
specified in the collection argument'''
hosts = get_hosts()
zk_hosts = hosts['zk_hosts']
solr_hosts = hosts['solr_hosts']
start_zk_ensemble(zk_hosts)
solr_hosts = setup_solr_instances(solr_hosts, n_instances,
install_new=False)
run_solr_instances(solr_hosts, zk_hosts=zk_hosts, n_shards=num_shards)
subprocess.call(['sleep', '3'])
#TODO; add other collection options
if collection == 'wikipedia':
status_url = load_wiki_documents()
atexit.register(stop_everything)
uploaded = False
while not uploaded:
uploaded = _check_import_handler(status_url)
time.sleep(30)
print '> Done uploading collection'
print '> The SolrCloud cluster is up. Terminate this process to shutdown \
the Solr and Zookeeper instances.'
while True:
time.sleep(1)
def _check_import_handler(url):
res = urllib2.urlopen(url)
if res.getcode() == 200:
data = res.read()
data = json.loads(data)
if data.get('status', '') != 'busy':
return True
else:
return False
else:
print'> Error contacting server at', url
return False
# Get hosts for current setup
def _read_host_conf(conf):
'''The solrcloud-hosts.conf is a tab-delimited list of hosts and their
roles. Below is an example of a 5-node allocation with 3 solr nodes and
2 Zookeeper nodes:
f1\tsolr
f2\tsolr
f3\tzk
f4\tzk
f5\tzk
'''
conf = open(conf)
roles = defaultdict(list)
for line in conf:
host, role = line.strip().split('\t')
roles[role].append(host)
return roles
def get_hosts():
'''Unless otherwise specified in the
solrcloud-hosts.conf file, assume first 3 nodes are dedicated
to the Zookeeper servers and that the Solr instances are assigned to the
remaining available nodes. If only 3 nodes are allocated, Zookeeper
and Solr share the nodes.
'''
nhosts = len(hosts)
if nhosts < 3:
raise ValueError, 'Insufficient number of nodes. Minimum 3 required'
if os.path.exists(HOST_CONF):
roles = _read_host_conf(HOST_CONF)
zk_hosts = [h for h in roles['zk']]
solr_hosts = [h for h in roles['solr']]
else:
hosts.sort()
zk_hosts = hosts[:3]
if nhosts == 3:
solr_hosts = zk_hosts[:]
else:
solr_hosts = hosts[3:]
return {'solr_hosts': solr_hosts, 'zk_hosts': zk_hosts}
def get_live_nodes():
'''Returns a list ({host}:{port}) of all the running Solr nodes'''
solr_hosts = get_hosts()['solr_hosts']
solr_hosts.sort()
host = solr_hosts[0] # Pick first solr node off list
url = 'http://{host}:{port}'.format(host=host, port=DEFAULT_SOLR_PORT)
url += '/solr/zookeeper?path=/live_nodes'
err_msg = '[ERROR] Solr and Zookeeper must be running in order to \
perform this operation'
nodes = []
try:
res = urllib2.urlopen(url)
except:
print err_msg
return
if res.getcode() == 200:
jsn = json.loads(res.read())
children = [n['data']['title'] for n in jsn['tree'][0]['children']]
nodes = [c.replace('_solr', '') for c in children]
nodes.sort()
internal_hosts = [n.split(':')[0] for n in nodes]
host_map = {}
# Map solr fbox node names to internal (high-bandwidth) ip addresses
i = 0
slr_inx = 0
while (len(host_map) < len(solr_hosts)) and i < len(nodes):
h = internal_hosts[i]
if h not in host_map:
host_map[h] = solr_hosts[slr_inx]
slr_inx += 1
i+=1
# Rename hosts by replacing internal IP addresses with f1,f2, etc names
for i, h in enumerate(internal_hosts):
nodes[i] = nodes[i].replace(h, host_map[h])
else:
print error_msg
return
return nodes
def stop_everything():
print '> Stopping all processes'
stop_solr()
stop_zk_ensemble()
def terminate_session():
'''Stop all services and remove all Solr data'''
nodes = get_live_nodes()
stop_solr()
stop_zk_ensemble()
print '> Removing Solr data from nodes'
for n in nodes:
host, port = n.split(':')
subprocess.call(['ssh', host, 'rm', '-rf', \
os.path.join(REMOTE_DIR, port)])
# Finally, remove any remaining Solr archive files
solr_archive = os.path.join(REMOTE_DIR, solr_app_tgz)
for h in get_hosts()['solr_hosts']:
subprocess.call(['ssh', h, 'rm', '-rf', \
solr_archive, zk_dir])
print '> Finished'
#------- Commands ------------------------------------------------------------
parser = argparse.ArgumentParser(
description='Setup SolrCloud to run on Firebox-0.')
parser.add_argument('action',
help='''Available actions:
setup-zk
start-zk
stop-zk
check-zk-health
setup-solr
start-solr
stop-solr
restart-solr
get-live-nodes
setup
start
stop
run-test
run-demo
index-samples
load-wikipedia
setup-nginx
test-query
terminate-session
''')
parser.add_argument('--instances', type=int, default=3, \
help='The number of solr instances to setup/run. default=3')
parser.add_argument('--shards', type=int, default=3, \
help='The number of shards in the collection. default=3')
parser.add_argument('--qps', type=int, default=400, \
help='Queries per second for the "run-test" option')
parser.add_argument('--duration', type=int, default=180, \
help='Duration in seconds of the "run-test" option')
args = parser.parse_args()
print '> COMMAND = ' + str(args.action)
num_shards = args.shards
n_instances = args.instances
qps = args.qps
duration = args.duration
# ## ZK and Solr hosts
all_hosts = get_hosts()
solr_hosts = all_hosts['solr_hosts']
zk_hosts = all_hosts['zk_hosts']
if args.action == 'setup-zk':
setup_zk_ensemble(zk_hosts)
elif args.action == 'start-zk':
start_zk_ensemble(zk_hosts)
elif args.action == 'stop-zk':
stop_zk_ensemble()
elif args.action == 'check-zk-health':
check_instances_running(zk_hosts, 'zk')
elif args.action == 'setup-solr':
solr_instances = setup_solr_instances(solr_hosts, n_instances=n_instances,
install_new=True)
elif args.action == 'start-solr':
solr_hosts = setup_solr_instances(solr_hosts, n_instances,
install_new=False)
run_solr_instances(solr_hosts, zk_hosts=zk_hosts, n_shards=num_shards)
elif args.action == 'run-demo':
run_demo(num_shards=num_shards, n_instances=n_instances)
elif args.action == 'start':
start(num_shards=num_shards, n_instances=n_instances)
elif args.action == 'setup':
setup(num_shards=num_shards, n_instances=n_instances)
elif args.action == 'stop':
stop_everything()
elif args.action == 'run-test':
run_test(duration=duration, qps=qps)
elif args.action == 'stop-solr':
stop_solr()
elif args.action == 'restart-solr':
restart_solr_instances(n_instances=n_instances, n_shards=num_shards)
elif args.action == 'add-solr-instances':
# n_instance is the resultant, total number you want in the cluster
# including any instances already instantiated and started
add_solr_instances(n_instances, num_shards)
elif args.action == 'index-samples':
index_sample_documents()
elif args.action == 'load-wikipedia':
load_wiki_documents()
elif args.action == 'setup-nginx':
setup_nginx(n_instances)
elif args.action == 'test-query':
test_query()
elif args.action == 'get-live-nodes':
print 'Live Solr instances:', get_live_nodes()
elif args.action == 'terminate-session':
terminate_session()
else:
print '[ERROR] Unknown action \'' + args.action[0] + '\''
| apache-2.0 |
Coelhon/MasterRepo.repository | plugin.video.youtube/resources/lib/kodion/impl/xbmc/xbmc_context.py | 7 | 9558 | import sys
import urllib
import urlparse
import weakref
import datetime
import json
import xbmc
import xbmcaddon
import xbmcplugin
import xbmcvfs
from ..abstract_context import AbstractContext
from .xbmc_plugin_settings import XbmcPluginSettings
from .xbmc_context_ui import XbmcContextUI
from .xbmc_system_version import XbmcSystemVersion
from .xbmc_playlist import XbmcPlaylist
from .xbmc_player import XbmcPlayer
from ... import utils
class XbmcContext(AbstractContext):
def __init__(self, path='/', params=None, plugin_name=u'', plugin_id=u'', override=True):
AbstractContext.__init__(self, path, params, plugin_name, plugin_id)
if plugin_id:
self._addon = xbmcaddon.Addon(id=plugin_id)
else:
self._addon = xbmcaddon.Addon()
pass
self._system_version = None
"""
I don't know what xbmc/kodi is doing with a simple uri, but we have to extract the information from the
sys parameters and re-build our clean uri.
Also we extract the path and parameters - man, that would be so simple with the normal url-parsing routines.
"""
# first the path of the uri
if override:
self._uri = sys.argv[0]
comps = urlparse.urlparse(self._uri)
self._path = urllib.unquote(comps.path).decode('utf-8')
# after that try to get the params
if len(sys.argv) > 2:
params = sys.argv[2][1:]
if len(params) > 0:
self._uri = self._uri + '?' + params
self._params = {}
params = dict(urlparse.parse_qsl(params))
for _param in params:
item = params[_param]
self._params[_param] = item.decode('utf-8')
pass
pass
pass
self._ui = None
self._video_playlist = None
self._audio_playlist = None
self._video_player = None
self._audio_player = None
self._plugin_handle = int(sys.argv[1]) if len(sys.argv) > 1 else None
self._plugin_id = plugin_id or self._addon.getAddonInfo('id')
self._plugin_name = plugin_name or self._addon.getAddonInfo('name')
self._version = self._addon.getAddonInfo('version')
self._native_path = xbmc.translatePath(self._addon.getAddonInfo('path'))
self._settings = XbmcPluginSettings(self._addon)
"""
Set the data path for this addon and create the folder
"""
self._data_path = xbmc.translatePath('special://profile/addon_data/%s' % self._plugin_id)
if isinstance(self._data_path, str):
self._data_path = self._data_path.decode('utf-8')
pass
if not xbmcvfs.exists(self._data_path):
xbmcvfs.mkdir(self._data_path)
pass
pass
def format_date_short(self, date_obj):
date_format = xbmc.getRegion('dateshort')
_date_obj = date_obj
if isinstance(_date_obj, datetime.date):
_date_obj = datetime.datetime(_date_obj.year, _date_obj.month, _date_obj.day)
pass
return _date_obj.strftime(date_format)
def format_time(self, time_obj):
time_format = xbmc.getRegion('time')
_time_obj = time_obj
if isinstance(_time_obj, datetime.time):
_time_obj = datetime.time(_time_obj.hour, _time_obj.minute, _time_obj.second)
pass
return _time_obj.strftime(time_format)
def get_language(self):
"""
The xbmc.getLanguage() method is fucked up!!! We always return 'en-US' for now
"""
return 'en-US'
"""
if self.get_system_version().get_release_name() == 'Frodo':
return 'en-US'
try:
language = xbmc.getLanguage(0, region=True)
language = language.split('-')
language = '%s-%s' % (language[0].lower(), language[1].upper())
return language
except Exception, ex:
self.log_error('Failed to get system language (%s)', ex.__str__())
return 'en-US'
pass
"""
def get_system_version(self):
if not self._system_version:
self._system_version = XbmcSystemVersion(version='', releasename='', appname='')
pass
return self._system_version
def get_video_playlist(self):
if not self._video_playlist:
self._video_playlist = XbmcPlaylist('video', weakref.proxy(self))
pass
return self._video_playlist
def get_audio_playlist(self):
if not self._audio_playlist:
self._audio_playlist = XbmcPlaylist('audio', weakref.proxy(self))
pass
return self._audio_playlist
def get_video_player(self):
if not self._video_player:
self._video_player = XbmcPlayer('video', weakref.proxy(self))
pass
return self._video_player
def get_audio_player(self):
if not self._audio_player:
self._audio_player = XbmcPlayer('audio', weakref.proxy(self))
pass
return self._audio_player
def get_ui(self):
if not self._ui:
self._ui = XbmcContextUI(self._addon, weakref.proxy(self))
pass
return self._ui
def get_handle(self):
return self._plugin_handle
def get_data_path(self):
return self._data_path
def get_native_path(self):
return self._native_path
def get_settings(self):
return self._settings
def localize(self, text_id, default_text=u''):
if isinstance(text_id, int):
"""
We want to use all localization strings!
Addons should only use the range 30000 thru 30999 (see: http://kodi.wiki/view/Language_support) but we
do it anyway. I want some of the localized strings for the views of a skin.
"""
if text_id >= 0 and (text_id < 30000 or text_id > 30999):
result = xbmc.getLocalizedString(text_id)
if result is not None and result:
return utils.to_unicode(result)
pass
pass
result = self._addon.getLocalizedString(int(text_id))
if result is not None and result:
return utils.to_unicode(result)
return utils.to_unicode(default_text)
def set_content_type(self, content_type):
self.log_debug('Setting content-type: "%s" for "%s"' % (content_type, self.get_path()))
xbmcplugin.setContent(self._plugin_handle, content_type)
pass
def add_sort_method(self, *sort_methods):
for sort_method in sort_methods:
xbmcplugin.addSortMethod(self._plugin_handle, sort_method)
pass
pass
def clone(self, new_path=None, new_params=None):
if not new_path:
new_path = self.get_path()
pass
if not new_params:
new_params = self.get_params()
pass
new_context = XbmcContext(path=new_path, params=new_params, plugin_name=self._plugin_name,
plugin_id=self._plugin_id, override=False)
new_context._function_cache = self._function_cache
new_context._search_history = self._search_history
new_context._favorite_list = self._favorite_list
new_context._watch_later_list = self._watch_later_list
new_context._access_manager = self._access_manager
new_context._ui = self._ui
new_context._video_playlist = self._video_playlist
new_context._video_player = self._video_player
return new_context
def execute(self, command):
xbmc.executebuiltin(command)
pass
def sleep(self, milli_seconds):
xbmc.sleep(milli_seconds)
pass
def addon_enabled(self, addon_id):
rpc_request = json.dumps({"jsonrpc": "2.0",
"method": "Addons.GetAddonDetails",
"id": 1,
"params": {"addonid": "%s" % addon_id,
"properties": ["enabled"]}
})
response = json.loads(xbmc.executeJSONRPC(rpc_request))
try:
return response['result']['addon']['enabled'] is True
except KeyError:
message = response['error']['message']
code = response['error']['code']
error = 'Requested |%s| received error |%s| and code: |%s|' % (rpc_request, message, code)
xbmc.log(error, xbmc.LOGDEBUG)
return False
def set_addon_enabled(self, addon_id, enabled=True):
rpc_request = json.dumps({"jsonrpc": "2.0",
"method": "Addons.SetAddonEnabled",
"id": 1,
"params": {"addonid": "%s" % addon_id,
"enabled": enabled}
})
response = json.loads(xbmc.executeJSONRPC(rpc_request))
try:
return response['result'] == 'OK'
except KeyError:
message = response['error']['message']
code = response['error']['code']
error = 'Requested |%s| received error |%s| and code: |%s|' % (rpc_request, message, code)
xbmc.log(error, xbmc.LOGDEBUG)
return False
| gpl-2.0 |
ujdhesa/unisubs | apps/videos/types/youtube.py | 1 | 27243 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
import logging
import re
from urlparse import urlparse
import babelsubs
import requests
import time
import gdata.youtube.client
from gdata.youtube.client import YouTubeError
import httplib
import httplib2
from celery.task import task
from django.conf import settings
from django.utils.http import urlquote
from django.utils.translation import ugettext_lazy as _
from gdata.service import RequestError
from gdata.youtube.service import YouTubeService
from lxml import etree
from base import VideoType, VideoTypeError
from utils.translation import SUPPORTED_LANGUAGE_CODES
from utils.metrics import Meter, Occurrence
from unilangs import LanguageCode
logger = logging.getLogger("youtube")
YOUTUBE_API_SECRET = getattr(settings, "YOUTUBE_API_SECRET", None)
YOUTUBE_ALWAYS_PUSH_USERNAME = getattr(settings,
'YOUTUBE_ALWAYS_PUSH_USERNAME', None)
_('Private video')
_('Undefined error')
FROM_YOUTUBE_MARKER = u'From youtube'
class TooManyRecentCallsException(Exception):
"""
Raised when the Youtube API responds with yt:quota too_many_recent_calls.
"""
def __init__(self, *args, **kwargs):
super(TooManyRecentCallsException, self).__init__(*args, **kwargs)
logger.info('too_many_calls', extra={
'exception_args': args,
'exception_kwargs': kwargs})
Occurrence('youtube.api_too_many_calls').mark()
from atom.http_core import Uri
import atom
def monkeypatch_class(name, bases, namespace):
assert len(bases) == 1, "Exactly one base class required"
base = bases[0]
for name, value in namespace.iteritems():
if name != "__metaclass__":
setattr(base, name, value)
return base
class HttpClient(atom.http_core.HttpClient):
__metaclass__ = monkeypatch_class
debug = None
def Request(self, http_request):
return self._http_request(http_request.method, http_request.uri,
http_request.headers, http_request._body_parts)
def _get_connection(self, uri, headers=None):
"""Opens a socket connection to the server to set up an HTTP request.
Args:
uri: The full URL for the request as a Uri object.
headers: A dict of string pairs containing the HTTP headers for the
request.
"""
connection = None
if uri.scheme == 'https':
if not uri.port:
connection = httplib.HTTPSConnection(uri.host)
else:
connection = httplib.HTTPSConnection(uri.host, int(uri.port))
else:
if not uri.port:
connection = httplib.HTTPConnection(uri.host)
else:
connection = httplib.HTTPConnection(uri.host, int(uri.port))
return connection
def _http_request(self, method, uri, headers=None, body_parts=None):
"""Makes an HTTP request using httplib.
Args:
method: str example: 'GET', 'POST', 'PUT', 'DELETE', etc.
uri: str or atom.http_core.Uri
headers: dict of strings mapping to strings which will be sent as HTTP
headers in the request.
body_parts: list of strings, objects with a read method, or objects
which can be converted to strings using str. Each of these
will be sent in order as the body of the HTTP request.
"""
extra = {
'youtube_headers': headers,
'youtube_uri': {
'host': uri.host,
'port': uri.port,
'scheme': uri.scheme,
'path': uri.path,
'query': uri.query
},
'youtube_method': method,
'youtube_body_parts': body_parts
}
logger.info('youtube api request', extra=extra)
if isinstance(uri, (str, unicode)):
uri = Uri.parse_uri(uri)
connection = self._get_connection(uri, headers=headers)
if self.debug:
connection.debuglevel = 1
if connection.host != uri.host:
connection.putrequest(method, str(uri))
else:
connection.putrequest(method, uri._get_relative_path())
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (uri.scheme == 'https' and int(uri.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % uri.host
replacement_header_line = 'Host: %s' % uri.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# Send the HTTP headers.
for header_name, value in headers.iteritems():
connection.putheader(header_name, value)
connection.endheaders()
# If there is data, send it in the request.
if body_parts and filter(lambda x: x != '', body_parts):
for part in body_parts:
_send_data_part(part, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _send_data_part(data, connection):
if isinstance(data, (str, unicode)):
# I might want to just allow str, not unicode.
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
def get_youtube_service():
"""
Gets instance of youtube service with the proper developer key
this is needed, else our quota is serverly damaged.
"""
yt_service = YouTubeService(developer_key=YOUTUBE_API_SECRET)
yt_service.ssl = False
return yt_service
yt_service = get_youtube_service()
@task
def save_subtitles_for_lang(lang, video_pk, youtube_id):
from django.utils.encoding import force_unicode
from videos.models import Video
from videos.tasks import video_changed_tasks
from subtitles.pipeline import add_subtitles
from subtitles.models import ORIGIN_IMPORTED
yt_lc = lang.get('lang_code')
# TODO: Make sure we can store all language data given to us by Youtube.
# Right now, the bcp47 codec will refuse data it can't reliably parse.
try:
lc = LanguageCode(yt_lc, "bcp47").encode("unisubs")
except KeyError:
logger.warn("Youtube import did not find language code", extra={
"data":{
"language_code": yt_lc,
"youtube_id": youtube_id,
}
})
return
if not lc in SUPPORTED_LANGUAGE_CODES:
logger.warn("Youtube import did not find language code", extra={
"data":{
"language_code": lc,
"youtube_id": youtube_id,
}
})
return
try:
video = Video.objects.get(pk=video_pk)
except Video.DoesNotExist:
return
url = u'http://www.youtube.com/api/timedtext?v=%s&lang=%s&name=%s&fmt=srt'
url = url % (youtube_id, yt_lc, urlquote(lang.get('name', u'')))
xml = YoutubeVideoType._get_response_from_youtube(url, return_string=True)
if not bool(xml):
return
xml = force_unicode(xml, 'utf-8')
subs = babelsubs.parsers.discover('srt').parse(xml).to_internal()
version = add_subtitles(video, lc, subs, note="From youtube", complete=True, origin=ORIGIN_IMPORTED)
# do not pass a version_id else, we'll trigger emails for those edits
video_changed_tasks.delay(video.pk)
Meter('youtube.lang_imported').inc()
from apps.teams.models import BillingRecord
# there is a caveat here, if running with CELERY_ALWAYS_EAGER,
# this is called before there's a team video, and the billing records won't
# be created. On the real world, it should be safe to assume that between
# calling the youtube api and the db insertion, we'll get this called
# when the video is already part of a team
BillingRecord.objects.insert_record(version)
def should_add_credit(subtitle_version=None, video=None):
# Only add credit to non-team videos
if not video and not subtitle_version:
raise Exception("You need to pass in at least one argument")
if not video:
video = subtitle_version.subtitle_language.video
return not video.get_team_video()
def add_credit(subtitle_version, subs):
# If there are no subtitles, don't add any credits. This shouldn't really
# happen since only completed subtitle versions can be synced to Youtube.
# But a little precaution never hurt anyone.
if len(subs) == 0:
return subs
if not should_add_credit(subtitle_version=subtitle_version):
return subs
from accountlinker.models import get_amara_credit_text
language_code = subtitle_version.subtitle_language.language_code
dur = subtitle_version.subtitle_language.video.duration
last_sub = subs[-1]
if last_sub.end_time is None:
return subs
time_left_at_the_end = (dur * 1000) - last_sub.end_time
if time_left_at_the_end <= 0:
return subs
if time_left_at_the_end >= 3000:
start = (dur - 3) * 1000
else:
start = (dur * 1000) - time_left_at_the_end
subs.append_subtitle(
start,
dur * 1000,
get_amara_credit_text(language_code),
{}
)
print subs.subtitle_items()
return subs
class YoutubeVideoType(VideoType):
_url_patterns = [re.compile(x) for x in [
r'youtube.com/.*?v[/=](?P<video_id>[\w-]+)',
r'youtu.be/(?P<video_id>[\w-]+)',
]]
HOSTNAMES = ( "youtube.com", "youtu.be", "www.youtube.com",)
abbreviation = 'Y'
name = 'Youtube'
site = 'youtube.com'
# changing this will cause havock, let's talks about this first
URL_TEMPLATE = 'http://www.youtube.com/watch?v=%s'
CAN_IMPORT_SUBTITLES = True
def __init__(self, url):
self.url = url
self.videoid = self._get_video_id(self.url)
self.entry = self._get_entry(self.video_id)
# we can't rely on author.name as that might not be unique
# and it also won't match what the 3rd party account has
username_url = self.entry.author[0].uri.text
self.username = username_url[username_url.rfind("/")+ 1:]
@property
def video_id(self):
return self.videoid
def convert_to_video_url(self):
return 'http://www.youtube.com/watch?v=%s' % self.video_id
@classmethod
def video_url(cls, obj):
"""
This method can be called with wither a VideoType object or
an actual VideoURL object, therefore the if statement
"""
if obj.videoid:
return YoutubeVideoType.url_from_id(obj.videoid)
else:
return obj.url
@classmethod
def matches_video_url(cls, url):
hostname = urlparse(url).netloc
return hostname in YoutubeVideoType.HOSTNAMES and cls._get_video_id(url)
def create_kwars(self):
return {'videoid': self.video_id}
def set_values(self, video_obj, fetch_subs_async=True):
video_obj.title = self.entry.media.title.text or ''
description = ''
if self.entry.media.description:
description = self.entry.media.description.text or ''
video_obj.description = description
if self.entry.media.duration:
video_obj.duration = int(self.entry.media.duration.seconds)
if self.entry.media.thumbnail:
# max here will return the thumbnail with the biggest height
thumbnail = max([(int(t.height), t) for t in self.entry.media.thumbnail])
video_obj.thumbnail = thumbnail[1].url
video_obj.small_thumbnail = 'http://i.ytimg.com/vi/%s/default.jpg' % self.video_id
video_obj.save()
Meter('youtube.video_imported').inc()
try:
self.get_subtitles(video_obj, async=fetch_subs_async)
except :
logger.exception("Error getting subs from youtube:" )
return video_obj
def _get_entry(self, video_id):
Meter('youtube.api_request').inc()
try:
return yt_service.GetYouTubeVideoEntry(video_id=str(video_id))
except RequestError, e:
err = e[0].get('body', 'Undefined error')
raise VideoTypeError('Youtube error: %s' % err)
@classmethod
def url_from_id(cls, video_id):
return YoutubeVideoType.URL_TEMPLATE % video_id
@classmethod
def _get_video_id(cls, video_url):
for pattern in cls._url_patterns:
match = pattern.search(video_url)
video_id = match and match.group('video_id')
if bool(video_id):
return video_id
return False
@classmethod
def _get_response_from_youtube(cls, url, return_string=False):
h = httplib2.Http()
resp, content = h.request(url, "GET")
if resp.status < 200 or resp.status >= 400:
logger.error("Youtube subtitles error", extra={
'data': {
"url": url,
"status_code": resp.status,
"response": content
}
})
return
try:
if return_string:
return content
return etree.fromstring(content)
except etree.XMLSyntaxError:
logger.error("Youtube subtitles error. Failed to parse response.", extra={
'data': {
"url": url,
"response": content
}
})
return
def get_subtitled_languages(self):
url = "http://www.youtube.com/api/timedtext?type=list&v=%s" % self.video_id
xml = self._get_response_from_youtube(url)
if xml is None:
return []
output = []
for lang in xml.xpath('track'):
item = dict(
lang_code=lang.get('lang_code'),
name=lang.get('name', u'')
)
output.append(item)
return output
def get_subtitles(self, video_obj, async=True):
langs = self.get_subtitled_languages()
if async:
func = save_subtitles_for_lang.delay
else:
func = save_subtitles_for_lang.run
for item in langs:
func(item, video_obj.pk, self.video_id)
def _get_bridge(self, third_party_account):
# Because somehow Django's ORM is case insensitive on CharFields.
is_always = (third_party_account.full_name.lower() ==
YOUTUBE_ALWAYS_PUSH_USERNAME.lower() or
third_party_account.username.lower() ==
YOUTUBE_ALWAYS_PUSH_USERNAME.lower())
return YouTubeApiBridge(third_party_account.oauth_access_token,
third_party_account.oauth_refresh_token, self.videoid, is_always)
def update_subtitles(self, subtitle_version, third_party_account):
"""
Updated subtitles on Youtube. This method should not be called
directly. See accountlinker.models.ThirdPartyAccounts.mirror_on_third_party
That call will check if the video can be updated(must be synched,
must be public, etc).
"""
bridge = self._get_bridge(third_party_account)
bridge.upload_captions(subtitle_version)
def delete_subtitles(self, language, third_party_account):
bridge = self._get_bridge(third_party_account)
bridge.delete_subtitles(language)
def _prepare_subtitle_data_for_version(subtitle_version):
"""
Given a subtitles.models.SubtitleVersion, return a tuple of srt content,
title and language code.
"""
language_code = subtitle_version.subtitle_language.language_code
try:
lc = LanguageCode(language_code.lower(), "unisubs")
language_code = lc.encode("bcp47")
except KeyError:
error = "Couldn't encode LC %s to youtube" % language_code
logger.error(error)
raise KeyError(error)
subs = subtitle_version.get_subtitles()
subs = add_credit(subtitle_version, subs)
content = babelsubs.generators.discover('srt').generate(subs)
content = unicode(content).encode('utf-8')
return content, "", language_code
class YouTubeApiBridge(gdata.youtube.client.YouTubeClient):
upload_uri_base = 'http://gdata.youtube.com/feeds/api/users/default/uploads/%s'
def __init__(self, access_token, refresh_token, youtube_video_id,
is_always_push_account=False):
"""
A wrapper around the gdata client, to make life easier.
In order to edit captions for a video, the oauth credentials
must be that video's owner on youtube.
"""
super(YouTubeApiBridge, self).__init__()
self.access_token = access_token
self.refresh_token = refresh_token
self.token = gdata.gauth.OAuth2Token(
client_id=settings.YOUTUBE_CLIENT_ID,
client_secret=settings.YOUTUBE_CLIENT_SECRET,
scope='https://gdata.youtube.com',
user_agent='universal-subtitles',
access_token=access_token,
refresh_token=refresh_token
)
self.token.authorize(self)
self.youtube_video_id = youtube_video_id
self.is_always_push_account = is_always_push_account
def request(self, *args, **kwargs):
"""
Override the very low-level request method to catch possible
too_many_recent_calls errors.
"""
Meter('youtube.api_request').inc()
try:
return super(YouTubeApiBridge, self).request(*args, **kwargs)
except gdata.client.RequestError, e:
if 'too_many_recent_calls' in str(e):
raise TooManyRecentCallsException(e.headers, e.reason,
e.status, e.body)
else:
raise e
def refresh(self):
"""
Refresh the access token
"""
url = 'https://accounts.google.com/o/oauth2/token'
data = {
'client_id': settings.YOUTUBE_CLIENT_ID,
'client_secret': settings.YOUTUBE_CLIENT_SECRET,
'refresh_token': self.refresh_token,
'grant_type': 'refresh_token'
}
r = requests.post(url, data=data)
self.access_token = r.json and r.json.get('access_token')
def _get_captions_info(self):
"""
Retrieves a dictionary with the current caption data for this youtube video.
Format is:
{
"lang_code": {
"url": [url for track]
"track": [track entry object, useful for other operations]
}
}
"""
self.captions = {}
entry = self.GetVideoEntry(video_id=self.youtube_video_id)
caption_track = entry.get_link(rel='http://gdata.youtube.com/schemas/2007#video.captionTracks')
if not caption_track:
# No tracks were returned. This video doesn't have any existing
# captions.
return self.captions
captions_feed = self.get_feed(caption_track.href, desired_class=gdata.youtube.data.CaptionFeed)
captions = captions_feed.entry
for entry in captions:
lang = entry.get_elements(tag="content")[0].lang
url = entry.get_edit_media_link().href
self.captions[lang] = {
"url": url,
"track": entry
}
return self.captions
def get_user_profile(self, username=None):
if not username:
raise YouTubeError("You need to pass a username")
uri = '%s%s' % (gdata.youtube.client.YOUTUBE_USER_FEED_URI, username)
return self.get_feed(uri, desired_class=gdata.youtube.data.UserProfileEntry)
def upload_captions(self, subtitle_version):
"""
Will upload the subtitle version to this youtube video id.
If the subtitle already exists, will delete it and recreate it.
This subs should be synced! Else we upload might fail.
"""
lang = subtitle_version.subtitle_language.language_code
try:
lc = LanguageCode(lang.lower(), "unisubs")
lang = lc.encode("youtube")
except KeyError:
logger.error("Couldn't encode LC %s to youtube" % lang)
return
subs = subtitle_version.get_subtitles()
if not self.is_always_push_account:
subs = add_credit(subtitle_version, subs)
self.add_credit_to_description(subtitle_version.subtitle_language.video)
content = babelsubs.generators.discover('srt').generate(subs).encode('utf-8')
title = ""
if hasattr(self, "captions") is False:
self._get_captions_info()
# We can't just update a subtitle track in place. We need to delete
# the old one and upload a new one.
if lang in self.captions:
self._delete_track(self.captions[lang]['track'])
res = self.create_track(self.youtube_video_id, title, lang,
content, settings.YOUTUBE_CLIENT_ID,
settings.YOUTUBE_API_SECRET, self.token, {'fmt':'srt'})
Meter('youtube.subs_pushed').inc()
return res
def add_credit_to_description(self, video):
"""
Get the entry information from Youtube, extract the original
description, prepend the description with Amara credits and push it
back to Youtube.
If our update call doesn't succeed on the first try, we refresh the
access token and try again.
If the existing description starts with the credit text, we just
return.
"""
from accountlinker.models import add_amara_description_credit, check_authorization
from apps.videos.templatetags.videos_tags import shortlink_for_video
if not should_add_credit(video=video):
return False
is_authorized, _ = check_authorization(video)
if not is_authorized:
return False
uri = self.upload_uri_base % self.youtube_video_id
entry = self.GetVideoEntry(uri=uri)
entry = entry.to_string()
entry = gdata.youtube.YouTubeVideoEntryFromString(entry)
old_description = entry.media.description.text or ''
if old_description:
old_description = old_description.decode("utf-8")
video_url = shortlink_for_video(video)
language_code = video.language
if not language_code:
language_code = 'en'
new_description = add_amara_description_credit(old_description,
video_url, language_code)
if new_description == old_description:
return True
entry.media.description.text = new_description
entry = entry.ToString()
status_code = self._make_update_request(uri, entry)
if status_code == 401:
self.refresh()
status_code = self._make_update_request(uri, entry)
if status_code == 200:
Meter('youtube.description_changed').inc()
return True
return False
def _do_update_request(self, uri, data, headers):
return requests.put(uri, data=data, headers=headers)
def _make_update_request(self, uri, entry):
Meter('youtube.api_request').inc()
headers = {
'Content-Type': 'application/atom+xml',
'Authorization': 'Bearer %s' % self.access_token,
'GData-Version': '2',
'X-GData-Key': 'key=%s' % YOUTUBE_API_SECRET
}
status_code = 0
retry_count = 0
while True:
r = self._do_update_request(uri, data=entry, headers=headers)
# if not 400 or 403, assume success (i.e. 200, 201, etc.)
if r.status_code != 400 and r.status_code != 403:
break
if r.status_code == 403 and 'too_many_recent_calls' in r.content:
#raise TooManyRecentCallsException(r.headers, r.raw)
extra = r.headers
extra['raw'] = r.raw
logger.error('Youtube too many recent calls', extra=extra)
if r.status_code == 400:
extra = { 'raw': r.raw, 'content': r.content }
logger.error('Youtube API request failed', extra=extra)
retry_count += 1
if retry_count > 60: # retry for a max of ~ 10 min
logger.error('Retries exhausted for Youtube API request',
extra = { 'content': r.content, 'status': r.status_code,
'headers': r.headers, 'uri': uri })
break
time.sleep(10)
status_code = r.status_code
return status_code
def _delete_track(self, track):
res = self.delete_track(self.youtube_video_id, track,
settings.YOUTUBE_CLIENT_ID, settings.YOUTUBE_API_SECRET,
self.token)
return res
def delete_subtitles(self, language):
"""
Deletes the subtitles for this language on this YouTube video.
Smart enought to determine if this video already has such subs
"""
try:
lc = LanguageCode(language, "unisubs")
lang = lc.encode("youtube")
except KeyError:
logger.error("Couldn't encode LC %s to youtube" % language)
return
if hasattr(self, "captions") is False:
self._get_captions_info()
if lang in self.captions:
self._delete_track(self.captions[lang]['track'])
else:
logger.error("Couldn't find LC %s in youtube" % lang)
| agpl-3.0 |
lavizhao/insummer | code/test/test_query_expansion.py | 1 | 2511 | #!/usr/bin/python3
import json
import sys
sys.path.append("..")
import insummer
#这个是只用了title的Question类,非常傻逼易用好用
from insummer.common_type import NaiveQuestion
from insummer.read_conf import config
from insummer.knowledge_base import init_assoc_space
from insummer.query_expansion.entity_finder import NgramEntityFinder
from conceptnet5 import assoc_query
from conceptnet5 import query
from conceptnet5.query import AssertionFinder as Finder
#读数据,是个json群
def read_data(fname):
questions = []
f = open(fname)
raw = f.readlines()
for line in raw:
if len(line) <= 0:
continue
line_json = json.loads(line)
title = line_json["title"]
entity = line_json["entity"]
entity = entity.split(",")
#建一个naive版本的question
nq = NaiveQuestion(title,entity)
questions.append(nq)
return questions
if __name__ == '__main__':
print("这个是测试语义扩展的")
print("需要做的第一步是读取数据,建立一个比较虚假的quesion_list类")
#注册表
#conf = config("../../conf/question.conf")
#读数据
#questions = read_data(conf["title_qe_pos"])
#装载spreading activation
#finder = Finder()
#dir1 = '/home/lavi/.conceptnet5/assoc/assoc-space-5.3'
#sa = assoc_query.AssocSpaceWrapper(dir1,finder)
title = "How do Motorcycles pollute? Are Motorcycles the worst polluter.if not what is?What are the ways Motorcycles pollute. "
nq = NaiveQuestion(title,entity=None)
#找实体
ef = NgramEntityFinder(title)
entity = ef.find()
print(entity)
nq.set_entity(entity)
questions = [nq]
sa = init_assoc_space()
g = lambda x : x.startswith('/c/en')
for question in questions:
question.print()
terms = question.get_entity()
terms = [('/c/en/'+i,1.0) for i in terms]
result = sa.expand_terms(terms,40)
for term,weight in result:
if term.startswith('/c/en') and len(term) < 40:
print("%40s%40s"%(term[6:],weight))
#print("%40s%40s"%(term,weight))
print(100*"=")
result1 = sa.associations(terms,limit=40)
for term,weight in result1:
if term.startswith('/c/en') and len(term) < 40:
print("%40s%40s"%(term[6:],weight))
#print("%40s%40s"%(term,weight))
print(40*"=")
| mit |
acosinwork/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/response.py | 227 | 7410 | # urllib3/response.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import gzip
import logging
import zlib
from io import BytesIO
from .exceptions import DecodeError
from .packages.six import string_types as basestring
log = logging.getLogger(__name__)
def decode_gzip(data):
gzipper = gzip.GzipFile(fileobj=BytesIO(data))
return gzipper.read()
def decode_deflate(data):
try:
return zlib.decompress(data)
except zlib.error:
return zlib.decompress(data, -zlib.MAX_WBITS)
class HTTPResponse(object):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = {
'gzip': decode_gzip,
'deflate': decode_deflate,
}
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = headers or {}
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self._decode_content = decode_content
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in [301, 302, 303, 307]:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, decoding and caching
is skipped because we can't decode partial content nor does it make
sense to cache partial content as the full response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header. (Overridden if ``amt`` is set.)
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 2616
# Section 3.5
content_encoding = self.headers.get('content-encoding', '').lower()
decoder = self.CONTENT_DECODERS.get(content_encoding)
if decode_content is None:
decode_content = self._decode_content
if self._fp is None:
return
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
else:
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do not
# properly close the connection in all cases. There is no harm
# in redundantly calling close.
self._fp.close()
return data
try:
if decode_content and decoder:
data = decoder(data)
except (IOError, zlib.error):
raise DecodeError("Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding)
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
# Normalize headers between different versions of Python
headers = {}
for k, v in r.getheaders():
# Python 3: Header keys are returned capitalised
k = k.lower()
has_value = headers.get(k)
if has_value: # Python 3: Repeating header keys are unmerged.
v = ', '.join([has_value, v])
headers[k] = v
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
| lgpl-2.1 |
c4fcm/MediaCloud-API-Client | mediacloud/test/api_topic_test.py | 1 | 14910 | from mediacloud.test.basetest import AdminApiBaseTest, ApiBaseTest
TEST_TOPIC_ID = 1537 # climate change topic
TEST_TOPIC2_ID = 1019 # common core
TEST_TOPIC3_ID = 3180 # rahul test
class ApiTopicTest(AdminApiBaseTest):
def testTopic(self):
topic = self._mc.topic(TEST_TOPIC_ID)
self.assertEqual(int(topic['topics_id']), 1537)
self.assertEqual(topic['name'], 'Climate Change 2016')
def testTopicHasMaxStories(self):
topic = self._mc.topic(TEST_TOPIC_ID)
self.assertIn('max_stories', topic)
try:
int(topic['max_stories'])
self.assertTrue(True)
except ValueError:
self.assertTrue(False, "max_stories value of '{}' is not an int ".format(topic['max_stories']))
def testTopicList(self):
# verify it pulls some
topic_list = self._mc.topicList()
self.assertGreater(len(topic_list['topics']), 1)
# verify limit param
topic_list = self._mc.topicList(limit=2)
self.assertEqual(len(topic_list['topics']), 2)
# verify limit param
topic_list = self._mc.topicList(limit=1)
self.assertEqual(len(topic_list['topics']), 1)
def testTopicListPublic(self):
topic_list = self._mc.topicList(public=True)
self.assertGreater(len(topic_list), 1)
for topic in topic_list['topics']:
self.assertEqual(topic['is_public'], 1)
def testTopicListName(self):
to_match = "common"
topic_list = self._mc.topicList(name=to_match)
self.assertGreater(len(topic_list), 1)
for topic in topic_list['topics']:
self.assertIn(to_match.lower(), topic['name'].lower())
def testTopicListPaging(self):
# verify second page doesn't contain any ids from the first page
topic_list_page_1 = self._mc.topicList()
page_1_ids = [t['topics_id'] for t in topic_list_page_1['topics']]
self.assertGreater(len(topic_list_page_1), 1)
topic_list_page_2 = self._mc.topicList(topic_list_page_1['link_ids']['next'])
self.assertGreater(len(topic_list_page_2), 1)
page_2_ids = [t['topics_id'] for t in topic_list_page_2['topics']]
self.assertTrue(len(set(page_1_ids).intersection(set(page_2_ids))), 0)
class ApiTopicSnapshotTest(AdminApiBaseTest):
def testTopicSnapshotList(self):
# make sure it works
snapshots = self._mc.topicSnapshotList(TEST_TOPIC_ID)
self.assertEqual(len(snapshots), 1)
class ApiTopicSpiderTest(AdminApiBaseTest):
def testTopicSpiderStatus(self):
results = self._mc.topicSpiderStatus(TEST_TOPIC2_ID)
self.assertIn('job_states', results)
'''
def testTopicSpiderIterationsList(self):
results = self._mc.topicSpiderIterationsList(TEST_TOPIC2_ID)
self.assertTrue('iterations' in results)
self.assertEqual(15, len(results['iterations']))
first_iteration = results['iterations'][0]
self.assertTrue('iteration' in first_iteration)
self.assertEqual(0, first_iteration['iteration'])
self.assertTrue('count' in first_iteration)
self.assertTrue(0, first_iteration['count'])
'''
class ApiTopicTimespanTest(AdminApiBaseTest):
def testTopicTimespanList(self):
# verify it pulls data
timespans = self._mc.topicTimespanList(1)
self.assertGreater(len(timespans), 1)
class AdminTopicStoryListTest(AdminApiBaseTest):
TOPIC_ID = 1
def testTopicStoryListFacebookData(self):
response = self._mc.topicStoryListFacebookData(self.TOPIC_ID)
self.assertEqual(len(response['counts']), 1000)
for story in response['counts']:
self.assertIn('facebook_api_collect_date', story)
self.assertIn('facebook_comment_count', story)
self.assertIn('facebook_share_count', story)
self.assertIn('stories_id', story)
def testTopicStoryList(self):
response = self._mc.topicStoryList(self.TOPIC_ID)
self.assertEqual(len(response['stories']), 20)
for story in response['stories']:
self.assertIn('date_is_reliable', story)
def testTopicStoryListPaging(self):
limit = 50
response_page_1 = self._mc.topicStoryList(self.TOPIC_ID, limit=limit)
response_page_1_ids = [m['stories_id'] for m in response_page_1['stories']]
self.assertEqual(len(response_page_1['stories']), 50)
self.assertIn('link_ids', response_page_1)
response_page_2 = self._mc.topicStoryList(self.TOPIC_ID, link_id=response_page_1['link_ids']['next'],
limit=limit)
response_page_2_ids = [m['stories_id'] for m in response_page_2['stories']]
# verify no duplicated media_ids across pages
combined_ids = set(response_page_1_ids+response_page_2_ids)
self.assertEqual(len(response_page_1_ids)+len(response_page_2_ids), len(combined_ids))
def testTopicStoryListLimit(self):
response1 = self._mc.topicStoryList(self.TOPIC_ID)
self.assertEqual(len(response1['stories']), 20)
response2 = self._mc.topicStoryList(self.TOPIC_ID, limit=67)
self.assertEqual(len(response2['stories']), 67)
def testTopicStoryListSortInlink(self):
response = self._mc.topicStoryList(self.TOPIC_ID, limit=500, sort='inlink')
last_inlink_count = 1000000000000
for story in response['stories']:
self.assertLessEqual(story['inlink_count'], last_inlink_count)
last_inlink_count = story['inlink_count']
def testTopicStoryListSortFacebook(self):
response = self._mc.topicStoryList(self.TOPIC_ID, limit=500, sort='facebook')
last_inlink_count = 1000000000000
for story in response['stories']:
self.assertLessEqual(story['facebook_share_count'], last_inlink_count)
last_inlink_count = story['facebook_share_count']
def testTopicStoryListSortTwitter(self):
response = self._mc.topicStoryList(self.TOPIC_ID, limit=500, sort='twitter')
last_inlink_count = 1000000000000
for story in response['stories']:
if (last_inlink_count is not None) and ('normalized_tweet_count' in story) and (story['normalized_tweet_count'] is not None):
self.assertLessEqual(story['normalized_tweet_count'], last_inlink_count)
last_inlink_count = story['normalized_tweet_count']
class TopicStoryLinksTest(AdminApiBaseTest):
def testStoryLinks(self):
results = self._mc.topicStoryLinks(TEST_TOPIC_ID)
self.assertGreater(len(results['links']), 0)
def testStoryLinksLimit(self):
results = self._mc.topicStoryLinks(TEST_TOPIC_ID, limit=100)
self.assertEqual(len(results['links']), 100)
def testStoryLinksPaging(self):
results = self._mc.topicStoryLinks(TEST_TOPIC_ID)
self.assertGreater(len(results['links']), 0)
results2 = self._mc.topicStoryLinks(TEST_TOPIC_ID, link_id=results['link_ids']['next'])
self.assertGreater(len(results2['links']), 0)
class AdminTopicStoryCountTest(AdminApiBaseTest):
TOPIC_ID = 1
def testTopicStoryCount(self):
response = self._mc.topicStoryCount(self.TOPIC_ID)
self.assertIn('count', response)
self.assertGreater(response['count'], 0)
response2 = self._mc.topicStoryCount(self.TOPIC_ID, q='Obama')
self.assertIn('count', response2)
self.assertGreater(response2['count'], 0)
self.assertGreater(response['count'], response2['count'])
class AdminTopicMediaListTest(AdminApiBaseTest):
TOPIC_ID = 1
def testTopicMediaList(self):
response = self._mc.topicMediaList(self.TOPIC_ID)
self.assertIn('link_ids', response)
self.assertIn('media', response)
for media in response['media']:
self.assertIn('media_id', media)
def testTopicMediaListMetadata(self):
response = self._mc.topicMediaList(self.TOPIC_ID)
for media in response['media']:
self.assertIn("pub_country", media['metadata'])
self.assertIn("pub_state", media['metadata'])
self.assertIn("language", media['metadata'])
self.assertIn("about_country", media['metadata'])
self.assertIn("media_type", media['metadata'])
def testTopicMediaListLimit(self):
response = self._mc.topicMediaList(self.TOPIC_ID)
self.assertEqual(len(response['media']), 20)
response = self._mc.topicMediaList(self.TOPIC_ID, limit=31)
self.assertEqual(len(response['media']), 31)
def testTopicMediaListPaging(self):
limit = 10
response_page_1 = self._mc.topicMediaList(self.TOPIC_ID, limit=limit)
response_page_1_ids = [m['media_id'] for m in response_page_1['media']]
self.assertEqual(len(response_page_1['media']), 10)
self.assertIn('link_ids', response_page_1)
response_page_2 = self._mc.topicMediaList(self.TOPIC_ID, link_id=response_page_1['link_ids']['next'],
limit=limit)
response_page_2_ids = [m['media_id'] for m in response_page_2['media']]
# verify no duplicated media_ids across pages
combined_ids = set(response_page_1_ids+response_page_2_ids)
self.assertEqual(len(response_page_1_ids)+len(response_page_2_ids), len(combined_ids))
def testTopicMediaListSortInlink(self):
response = self._mc.topicMediaList(self.TOPIC_ID, sort='inlink')
last_count = 1000000000000
for media in response['media']:
self.assertLessEqual(media['inlink_count'], last_count)
last_count = media['inlink_count']
def testTopicMediaListSortFacebook(self):
response = self._mc.topicMediaList(self.TOPIC_ID, sort='facebook')
last_count = 1000000000000
for media in response['media']:
if (last_count is not None) and (media['facebook_share_count'] is not None):
self.assertLessEqual(media['facebook_share_count'], last_count)
last_count = media['facebook_share_count']
def testTopicMediaListSortTwitter(self):
response = self._mc.topicMediaList(self.TOPIC_ID, sort='twitter')
last_count = 1000000000000
for media in response['media']:
if (last_count is not None) and ('simple_tweet_count' in media) and (media['simple_tweet_count'] is not None):
self.assertLessEqual(media['simple_tweet_count'], last_count)
last_count = media['simple_tweet_count']
class TopicMediaLinksText(AdminApiBaseTest):
def testMediaLinks(self):
results = self._mc.topicMediaLinks(TEST_TOPIC_ID)
self.assertGreater(len(results['links']), 0)
def testMediaLinksLimit(self):
results = self._mc.topicMediaLinks(TEST_TOPIC_ID, limit=100)
self.assertEqual(len(results['links']), 100)
def testMediaLinksPaging(self):
results = self._mc.topicMediaLinks(TEST_TOPIC_ID)
self.assertGreater(len(results['links']), 0)
results2 = self._mc.topicMediaLinks(TEST_TOPIC_ID, link_id=results['link_ids']['next'])
self.assertGreater(len(results2['links']), 0)
class AdminTopicWordCountTest(AdminApiBaseTest):
TOPIC_ID = 1
def testResults(self):
term_freq = self._mc.topicWordCount(self.TOPIC_ID)
self.assertEqual(len(term_freq), 500)
self.assertIn(term_freq[1]['term'], [u'zimmerman', u'trayvon', u'martin']) # based on the random sample it can change
def testSort(self):
term_freq = self._mc.topicWordCount(self.TOPIC_ID)
# verify sorted in desc order
last_count = 10000000000
for freq in term_freq:
self.assertGreaterEqual(last_count, freq['count'])
last_count = freq['count']
def testNumWords(self):
term_freq = self._mc.topicWordCount(self.TOPIC_ID)
self.assertEqual(len(term_freq), 500)
term_freq = self._mc.topicWordCount(self.TOPIC_ID, num_words=52)
self.assertEqual(len(term_freq), 52)
term_freq = self._mc.topicWordCount(self.TOPIC_ID, num_words=1000)
self.assertGreater(len(term_freq), 500)
class AdminTopicMediaMapTest(AdminApiBaseTest):
def testMediaMap(self):
results = self._mc.topicMediaMap(TEST_TOPIC2_ID)
self.assertIn('gexf', str(results))
def testMediaMapList(self):
results = self._mc.topicMediaMapList(TEST_TOPIC2_ID)
self.assertIn('timespan_maps', results)
self.assertEqual(len(results['timespan_maps']), 0)
class TopicSeedQueryTest(AdminApiBaseTest):
def testSeedQuery(self):
results = self._mc.topicAddSeedQuery(TEST_TOPIC3_ID, 'reddit', 'pushshift', 'rahul')
self.assertIn('topic_seed_query', results)
self.assertIn('topic_seed_queries_id', results['topic_seed_query'])
results = self._mc.topicRemoveSeedQuery(TEST_TOPIC3_ID, results['topic_seed_query']['topic_seed_queries_id'])
self.assertEqual(results['success'], 1)
class TopicInfoTest(AdminApiBaseTest):
def testTopicInfo(self):
results = self._mc.topicInfo()
self.assertIn('info', results)
self.assertIn('topic_platforms', results['info'])
self.assertIn('topic_platforms_sources_map', results['info'])
self.assertIn('topic_sources', results['info'])
class TopicTimespansListTest(ApiBaseTest):
def testTimespansList(self):
snapshots = self._mc.topicSnapshotList(TEST_TOPIC3_ID)
timespans = self._mc.topicTimespanList(TEST_TOPIC3_ID, snapshots_id=snapshots[0]['snapshots_id'])
assert len(timespans) > 0
results = self._mc.topicTimespanFiles(TEST_TOPIC3_ID, timespans_id=timespans[0]['timespans_id'])
assert 'timespan_files' in results
assert len(results['timespan_files']) > 0
assert 'timespans_id' in results['timespan_files'][0]
assert results['timespan_files'][0]['timespans_id'] == timespans[0]['timespans_id']
assert 'url' in results['timespan_files'][0]
assert results['timespan_files'][0]['url'].startswith('http')
assert 'name' in results['timespan_files'][0]
class TopicSnapshotsListTest(AdminApiBaseTest):
def testSnapshotsList(self):
snapshots = self._mc.topicSnapshotList(TEST_TOPIC3_ID)
results = self._mc.topicSnapshotFiles(TEST_TOPIC3_ID, snapshots_id=snapshots[0]['snapshots_id'])
assert 'snapshot_files' in results
assert len(results['snapshot_files']) > 0
assert 'snapshots_id' in results['snapshot_files'][0]
assert results['snapshot_files'][0]['snapshots_id'] == snapshots[0]['snapshots_id']
assert 'url' in results['snapshot_files'][0]
assert results['snapshot_files'][0]['url'].startswith('http')
assert 'name' in results['snapshot_files'][0]
| mit |
SM-G920P/Hacker_Kernel_SM-G920P | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
BeDjango/intef-openedx | common/lib/xmodule/xmodule/textannotation_module.py | 23 | 6794 | """Text annotation module"""
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String
from xmodule.annotator_mixin import get_instructions
from xmodule.annotator_token import retrieve_token
from xblock.fragment import Fragment
import textwrap
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class AnnotatableFields(object):
"""Fields for `TextModule` and `TextDescriptor`."""
data = String(
help=_("XML data for the annotation"),
scope=Scope.content,
default=textwrap.dedent("""\
<annotatable>
<instructions>
<p>
Add the instructions to the assignment here.
</p>
</instructions>
<p>
Lorem ipsum dolor sit amet, at amet animal petentium nec. Id augue nemore postulant mea. Ex eam dicant noluisse expetenda, alia admodum abhorreant qui et. An ceteros expetenda mea, tale natum ipsum quo no, ut pro paulo alienum noluisse.
</p>
</annotatable>
"""))
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default=_('Text Annotation'),
)
instructor_tags = String(
display_name=_("Tags for Assignments"),
help=_("Add tags that automatically highlight in a certain color using the comma-separated form, i.e. imagery:red,parallelism:blue"),
scope=Scope.settings,
default='imagery:red,parallelism:blue',
)
source = String(
display_name=_("Source/Citation"),
help=_("Optional for citing source of any material used. Automatic citation can be done using <a href=\"http://easybib.com\">EasyBib</a>"),
scope=Scope.settings,
default='None',
)
diacritics = String(
display_name=_("Diacritic Marks"),
help=_("Add diacritic marks to be added to a text using the comma-separated form, i.e. markname;urltomark;baseline,markname2;urltomark2;baseline2"),
scope=Scope.settings,
default='',
)
annotation_storage_url = String(
help=_("Location of Annotation backend"),
scope=Scope.settings,
default="http://your_annotation_storage.com",
display_name=_("Url for Annotation Storage")
)
annotation_token_secret = String(
help=_("Secret string for annotation storage"),
scope=Scope.settings,
default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
display_name=_("Secret Token String for Annotation")
)
default_tab = String(
display_name=_("Default Annotations Tab"),
help=_("Select which tab will be the default in the annotations table: myNotes, Instructor, or Public."),
scope=Scope.settings,
default="myNotes",
)
# currently only supports one instructor, will build functionality for multiple later
instructor_email = String(
display_name=_("Email for 'Instructor' Annotations"),
help=_("Email of the user that will be attached to all annotations that will be found in 'Instructor' tab."),
scope=Scope.settings,
default="",
)
annotation_mode = String(
display_name=_("Mode for Annotation Tool"),
help=_("Type in number corresponding to following modes: 'instructor' or 'everyone'"),
scope=Scope.settings,
default="everyone",
)
class TextAnnotationModule(AnnotatableFields, XModule):
''' Text Annotation Module '''
js = {'coffee': [],
'js': []}
css = {'scss': [resource_string(__name__, 'css/annotatable/display.scss')]}
icon_class = 'textannotation'
def __init__(self, *args, **kwargs):
super(TextAnnotationModule, self).__init__(*args, **kwargs)
xmltree = etree.fromstring(self.data)
self.instructions = self._extract_instructions(xmltree)
self.content = etree.tostring(xmltree, encoding='unicode')
self.user_email = ""
self.is_course_staff = False
if self.runtime.get_user_role() in ['instructor', 'staff']:
self.is_course_staff = True
if self.runtime.get_real_user is not None:
try:
self.user_email = self.runtime.get_real_user(self.runtime.anonymous_student_id).email
except Exception: # pylint: disable=broad-except
self.user_email = _("No email address found.")
def _extract_instructions(self, xmltree):
""" Removes <instructions> from the xmltree and returns them as a string, otherwise None. """
return get_instructions(xmltree)
def student_view(self, context):
""" Renders parameters to template. """
context = {
'course_key': self.runtime.course_id,
'display_name': self.display_name_with_default,
'tag': self.instructor_tags,
'source': self.source,
'instructions_html': self.instructions,
'content_html': self.content,
'token': retrieve_token(self.user_email, self.annotation_token_secret),
'diacritic_marks': self.diacritics,
'annotation_storage': self.annotation_storage_url,
'default_tab': self.default_tab,
'instructor_email': self.instructor_email,
'annotation_mode': self.annotation_mode,
'is_course_staff': self.is_course_staff,
}
fragment = Fragment(self.system.render_template('textannotation.html', context))
# TinyMCE already exists in Studio so we should not load the files again
# get_real_user always returns "None" in Studio since its runtimes contains no anonymous ids
if self.runtime.get_real_user is not None:
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/tinymce.full.min.js")
fragment.add_javascript_url(self.runtime.STATIC_URL + "js/vendor/tinymce/js/tinymce/jquery.tinymce.min.js")
return fragment
class TextAnnotationDescriptor(AnnotatableFields, RawDescriptor):
''' Text Annotation Descriptor '''
module_class = TextAnnotationModule
mako_template = "widgets/raw-edit.html"
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(TextAnnotationDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([
TextAnnotationDescriptor.annotation_storage_url,
TextAnnotationDescriptor.annotation_token_secret,
])
return non_editable_fields
| agpl-3.0 |
jirikuncar/invenio | invenio/legacy/bibknowledge/templates.py | 13 | 33921 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""HTML Templates for BibKnowledge administration."""
__revision__ = ""
# non Invenio imports
import os
import cgi
# Invenio imports
from invenio.base.i18n import gettext_set_language
from invenio.config import CFG_SITE_URL, CFG_WEBDIR
MAX_MAPPINGS = 100 # show max this number of mappings on one page
class Template:
"""Templating class."""
def tmpl_admin_kbs_management(self, ln, kbs, lookup_term=""):
"""
Returns the main management console for knowledge bases (shows a list of them).
@param ln: language
@param kbs: a list of dictionaries with knowledge bases attributes
@param lookup_term: hunt for this string in kb's
@return main management console as html
"""
_ = gettext_set_language(ln) # load the right message language
#top of the page and table header
searchforaterm_field = '<input type="text" name="search" value="%s" />' % cgi.escape(lookup_term, 1)
searchforaterm = _(
"Limit display to knowledge bases matching %(keyword_field)s in "
"their rules and descriptions", keyword_field=searchforaterm_field
)
out = '''
<!--make a search box-->
<table class="admin_wvar" cellspacing="0">
<tr><td>
<form action="kb">
%(searchforaterm)s
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="descriptiontoo" value="1" />
<input type="submit" class="adminbutton" value="%(search_button)s">
</form>
</td></tr></table>
<table class="admin_wvar" width="95%%" cellspacing="0">
<tr>
<th class="adminheaderleft" > </th>
<th class="adminheaderleft" >%(name)s</th>
<th class="adminheaderleft" >%(description)s</th>
<th class="adminheadercenter" >%(action)s
[<a href="%(siteurl)s/help/admin/bibknowledge-admin-guide#admin">?</a>]
</th>
</tr>''' % {'ln': ln,
'search': lookup_term,
'searchforaterm': searchforaterm,
'siteurl': CFG_SITE_URL,
'name': _("Name"),
'description': _("Description"),
'action': _("Action"),
'search_button': _("Search")}
#table content: kb names, description and actions
if len(kbs) == 0:
out += '''<tr>
<td colspan="5" class="admintd" align="center"><em>%s</em></td>
</tr>''' % _("No Knowledge Base")
else:
line = 0
for kb_attributes in kbs :
kb_attributes['style'] = ""
if line % 2:
kb_attributes['style'] = 'background-color: rgb(235, 247, 255);'
line += 1
kb_attributes['ln'] = ln
kb_attributes['siteurl'] = CFG_SITE_URL
kb_attributes['search'] = cgi.escape(lookup_term, 1)
kb_attributes['name'] = cgi.escape(kb_attributes['name'])
kb_attributes['description'] = cgi.escape(kb_attributes['description'])
kb_attributes['delete'] = _("Delete")
row_content = '''<tr>
<td class="admintdright" style="vertical-align: middle; %(style)s"> </td>
<td class="admintdleft" style="vertical-align: middle; %(style)s white-space: nowrap;">
<a href="kb?ln=%(ln)s&kb=%(id)s&search=%(search)s">%(name)s</a></td>
<td class="admintdleft"style="vertical-align: middle; %(style)s">%(description)s</td>
<td class="admintd" style="vertical-align: middle; %(style)s white-space: nowrap;">
<form action="kb?ln=%(ln)s" type="POST">
<input type="submit" class="adminbutton" value="%(delete)s">
<input type="hidden" id="kb" name="kb" value="%(id)s">
<input type="hidden" id="action" name="action" value="delete">
</form>
</td>
</tr>
''' % kb_attributes
out += row_content
#table footer, buttons and bottom of the page
out += ''' </table>
<table align="center" width="95%">
<tr>
<td align="left" valign="top"> </td>
'''
out += '''
<td align="left">
<form action="kb">
<input type="hidden" name="action" value="new" />
<input type="hidden" name="ln" value="%(ln)s" />
<input class="adminbutton" type="submit" value="%(add_new)s" />
</form>
</td>
<td align="right">
<form method="post" action="kb?ln=%(ln)s&action=new&kbtype=dynamic">
<input class="adminbutton" type="submit" value="%(config_dyn)s" />
</form>
</td>
<td align="right">
<form method="post" action="kb?action=new&kbtype=taxonomy&ln=%(ln)s">
<input class="adminbutton" type="submit" value="%(add_tax)s" />
</form>
</td>
</tr>
</table>''' % {'ln': ln, 'add_new': _("Add New Knowledge Base"),
'config_dyn': _("Configure a dynamic KB"),
'add_tax': _("Add New Taxonomy") }
return out
def tmpl_kb_prevnextlink(self, ln, p_or_n, kb_id, sortby, startat):
"""
An aux routine to make "Previous" or "Next" link
@param ln: language
@param p_or_n: p for previous, n for next
@param kb_id: knowledge base id
@param sortby: sort by to or from
@param startat: start at this pair
"""
_ = gettext_set_language(ln) # load the right message language
startat = str(startat) #to be sure
label = _("Next")
if p_or_n == 'p':
label = _("Previous")
r_url = '<a href="kb?ln=%(ln)s&kb=%(kb_id)s&'% {'ln':ln,
'kb_id':kb_id}
r_url += 'sortby=%(sortby)s&startat=%(start)s">' % {'sortby':sortby,
'start':startat}
r_url += label+'</a>'
return r_url
def tmpl_admin_show_taxonomy(self, ln, kb_id, kb_name):
"""
An auxiliary method used by tmpl_admin_kb_show in order to make a form to upload an ref file.
@param ln: language
@param kb_id: knowledge base id
@param kb_name: knowledge base name
@param basefilename: the file name (if already exists)
"""
_ = gettext_set_language(ln) # load the right message language
#check if this kb already has a file associated with it
#it would be named CFG_WEBDIR+"/kbfiles/"+kb_id+".rdf"
rdfname = CFG_WEBDIR+"/kbfiles/"+str(kb_id)+".rdf"
webname = CFG_SITE_URL+"/kb/export?kbname="+cgi.escape(kb_name, 1)
out = ""
if os.path.isfile(rdfname):
out += _("This knowledge base already has a taxonomy file.")+" "
out += _("If you upload another file, the current version will be replaced.")
out += "<br/>"
out += _("The current taxonomy can be accessed with this URL: %(x_url)s", x_url=('<a href="'+webname+'">'+webname+"</a>"))
else:
out += _("Please upload the RDF file for taxonomy %(x_name)s", x_name=cgi.escape(kb_name))
out += """
<br/>
<!-- enctype="multipart/form-data"-->
<form method="post" action="kb/upload" name="upload" enctype="multipart/form-data">
<input style="display:none;" name="kb", value="%(kb_id)s"/>
<input type="file" name="file"/>
<input type="submit" name="submit" value="%(upload)s" class="adminbutton"/>
</form>
""" % {'kb_id': kb_id,
'upload': _("Upload")}
return out
def tmpl_admin_dynamic_kb(self, ln, kb_id, dyn_config=None, collections=None, exportstr=""):
"""
An auxiliary method used by tmpl_admin_kb_show in order to configure a dynamic (collection based) kb.
@param ln: language
@param kb_id: the id of the kb
@param kb_name: the name of the kb
@param dyn_config: a dictionary with keys: expression
@param collections: a list of collection names
@param exportstr: a string to print about exporting
"""
_ = gettext_set_language(ln) # load the right message language
expression = ""
field = ""
collection = ""
if 'field' in dyn_config:
field = dyn_config['field']
if 'expression' in dyn_config:
expression = dyn_config['expression']
if 'collection' in dyn_config and dyn_config['collection']:
collection = dyn_config['collection']
pleaseconf = _("Please configure")+"<P>"
pleaseconf += _("A dynamic knowledge base is a list of values of a \
given field. The list is generated dynamically by \
searching the records using a search expression.")
pleaseconf += "<br/>"
pleaseconf += _("Example: Your records contain field 270__a for \
the name and address of the author's institute. \
If you set the field to '270__a' and the expression \
to '270__a:*Paris*', a list of institutes in Paris \
will be created.")+"<br/>"
pleaseconf += _("If the expression is empty, a list of all values \
in 270__a will be created.")+"<br/>"
pleaseconf += _("If the expression contains '%%', like '270__a:*%%*', \
it will be replaced by a search string when the \
knowledge base is used.")+"<br/><br/>"
pleaseconf += _("You can enter a collection name if the expression \
should be evaluated in a specific collection.")+"<br/><br/>"
pleaseconf += _("Example 1: Your records contain field 270__a for \
the name and address of the author's institute. \
If you set the field to '270__a' and the expression \
to '270__a:*Paris*', a list of institutes in Paris \
will be created.")+"<br/><br/>"
pleaseconf += _("Example 2: Return the institute's name (100__a) when the \
user gives its postal code (270__a): \
Set field to 100__a, expression to 270__a:*%%*.")+"<br/><br/>"
#create a pretty select box
selectbox = "<select name=\"collection\"><option value=\""+ _("Any collection") +"\">"+_("Any collection")+"</option>"
for mycoll in collections:
selectbox += "<option value=\""+mycoll+"\""
if mycoll == collection:
selectbox += " selected=\"1\" "
selectbox += ">"+mycoll+"</option>"
selectbox += "</select>"
pleaseconf += '''<form action="kb">
Field: <input name="field" value="%(field)s"/>
Expression: <input name="expression" value="%(expression)s"/>
Collection: %(selectbox)s
<input type="hidden" name="action" value="dynamic_update"/>
<input type="hidden" name="ln" value="%(ln)s"/>
<input type="hidden" name="kb" value="%(kb_id)s"/>
<input type="submit" name="submit" value="%(save)s" class="adminbutton"/>
</form>''' % { 'kb_id': kb_id,
'expression': cgi.escape(expression, 1),
'field': cgi.escape(field, 1),
'collection': cgi.escape(collection, 1),
'selectbox': selectbox, 'ln': ln ,
'save': _("Save")}
if field or expression:
pleaseconf += "<p>"+_("Exporting: ")
pleaseconf += "<a href=\""+exportstr+"\">"+exportstr+"</a><br/>"
return pleaseconf
def tmpl_admin_kb_show(self, ln, kb_id, kb_name, mappings,
sortby, startat=0, kb_type=None,
lookup_term="", dyn_config=None, collections=None):
"""
Returns the content of a knowledge base.
@param ln: language
@param kb_id: the id of the kb
@param kb_name: the name of the kb
@param mappings: a list of dictionaries with mappings
@param sortby: the sorting criteria ('from' or 'to')
@param startat: start showing the mappings from number x. Usefull for large kb's.
@param kb_type: None or 't' meaning taxonomy, or 'd' meaning a dynamic kb.
@param lookup_term: focus on this left side if it is in the KB
@param dyn_config: configuration for dynamic kb's
@param collections: a list of collections names (will be needed by dyn kb)
@return main management console as html
"""
_ = gettext_set_language(ln) # load the right message language
#top of the page and main table that split screen in two parts
out = '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="kb?ln=%(ln)s&\
sortby=%(sortby)s">%(close)s</a></small> </td>
<td>1. <small>%(mappings)s</small> </td>
<td>2. <small><a href="kb?ln=%(ln)s&\
action=attributes&kb=%(kb_id)s&sortby=%(sortby)s">%(attributes)s</a>
</small> </td>
</tr>
</table> ''' % {'ln':ln,
'kb_id':kb_id,
'sortby':sortby,
'close': _("Close Editor"),
'mappings': _("Knowledge Base Mappings"),
'attributes':_("Knowledge Base Attributes"),
'dependencies':_("Knowledge Base Dependencies"),
'menu': _("Menu")}
#Define some constants
try:
startati = int(startat)
except ValueError:
startati = 0
#to note about exporting..
export = CFG_SITE_URL+"/kb/export?kbname="+cgi.escape(kb_name, 1)
if kb_type == 'd':
#it's a dynamic kb. Create a config form.
return self.tmpl_admin_dynamic_kb(ln, kb_id, dyn_config, collections, export)
if kb_type == 't':
#it's a taxonomy (ontology). Show a dialog to upload it.
return self.tmpl_admin_show_taxonomy(ln, kb_id, kb_name)
hereyoucan = _("Here you can add new mappings to this base \
and change the base attributes.")
out += '<p>'+hereyoucan+'<table width="100%" align="center"><tr>'
#First column of table: add mapping form
out += '''
<td width="300" valign="top">
<form name="addNewMapping"
action="kb?ln=%(ln)s&action=add_mapping&kb=%(kb_id)s&\
sortby=%(sortby)s&forcetype=no&kb_type=%(kb_type)s"
method="post">''' % {'ln':ln, 'kb_id':kb_id,
'sortby':sortby, 'kb_type': kb_type}
mapfromstring = _("Map From")
maptostring = _("To")
out += '''
<table class="admin_wvar" width="100%%" cellspacing="0">
<tr>
<th colspan="2" class="adminheaderleft">
Add New Mapping
[<a href="%(siteurl)s/help/admin/bibknowledge-admin-guide#admin">?</a>]
</th>
</tr>
<tr>
<td class="admintdright">
<label for="mapFrom">
<span style="white-space: nowrap;">%(mapfrom)s</span>
</label>: </td>
<td><input tabindex="1" name="mapFrom" type="text"
id="mapFrom" size="25"/>
</td>
</tr>
<tr>
<td class="admintdright"><label for="mapTo">%(mapto)s</label>:
</td>
<td><input tabindex="2" name="mapTo" type="text" id="mapTo" size="25"/>
</td>
</tr>
<tr>
<td colspan="2" align="right"><input tabindex="3"
class="adminbutton" type="submit" value="Add new Mapping"/></td>
</tr>
</table>
</form>
<!--add a search box -->
<form name="kb">
<table class="admin_wvar" width="100%%" cellspacing="0">
<tr>
<th colspan="2" class="adminheaderleft">%(searchforamapping)s</th>
</tr>
<tr>
<td class="admintdright"><span style="white-space: nowrap;">%(search)s
</span></label>: </td>
<td><input name="search" type="text" value="%(lookup_term)s" size="25"/>
</td>
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="kb" value="%(kb_id)s"/>
</tr>
<td colspan="2" align="right">
<input class="adminbutton" type="submit" value="%(search)s"/>
</td>
</tr>
</table>
</form>
</td>
''' % {'siteurl':CFG_SITE_URL,
'mapfrom': mapfromstring, 'mapto': maptostring,
'search': _("Search"), 'ln': ln, 'kb_id':kb_id,
'lookup_term': cgi.escape(lookup_term, 1),
'searchforamapping': _("Search for a mapping") }
#calculate if prev/next are needed
#add prev/next buttons if needed
prevlink = ""
nextlink = ""
if startati > 0:
newstart = startati-MAX_MAPPINGS
if newstart < 0:
newstart = 0
prevlink = self.tmpl_kb_prevnextlink(ln, 'p', kb_id, sortby, newstart)
if len(mappings) > startati+MAX_MAPPINGS:
#all of them were not shown yet
newstart = startati+MAX_MAPPINGS
nextlink = self.tmpl_kb_prevnextlink(ln, 'n', kb_id, sortby, newstart)
#Second column: mappings table
#header and footer
out += '''
<td valign="top">
<table class="admin_wvar">
<thead>
<!--prev/next-->
<tr>
<td>%(prevlink)s</td><td>%(nextlink)s</td>
</tr>
<tr>
<th class="adminheaderleft" width="25"> </th>
<th class="adminheaderleft" width="34%%"><a href="kb?ln=%(ln)s&kb=%(kb_id)s&sortby=from">%(mapfrom)s</a></th>
<th class="adminheaderleft"> </th>
<th class="adminheaderleft" width="34%%"><a href="kb?ln=%(ln)s&kb=%(kb_id)s&sortby=to">%(mapto)s</a></th>
<th class="adminheadercenter" width="25%%">Action [<a href="%(siteurl)s/help/admin/bibknowledge-admin-guide#admin">?</a>]</th>
</tr>
</thead>
<tfoot>
<tr>
<td colspan="5"> </td>
</tr>
</tfoot>
<tbody>
''' % {'ln':ln,
'kb_id':kb_id,
'siteurl':CFG_SITE_URL,
'mapfrom': cgi.escape(mapfromstring, 1), 'mapto': cgi.escape(maptostring, 1),
'prevlink': prevlink, 'nextlink': nextlink }
#table content: key, value and actions
if len(mappings) == 0:
out += '''
<tr>
<td colspan="5" class="admintd" align="center"><em>%s</em></td>
</tr></tbody>''' % _("Knowledge base is empty")
else:
line = 0
tabindex_key = 6
tabindex_value = 7
tabindex_save_button = 8
mnum = 0 #current iteration in mappings
for mapping in mappings:
#roll to startat
mnum += 1
if mnum > startati and mnum <= startati+MAX_MAPPINGS:
style = "vertical-align: middle;"
if line % 2:
style += 'background-color: rgb(235, 247, 255);'
line += 1
tabindex_key += 3
tabindex_value += 3
tabindex_save_button += 3
row_content = '''
<tr>
<td colspan="5">
<form action="kb?action=edit_mapping&ln=%(ln)s&kb=%(kb_id)s&sortby=%(sortby)s" name="%(key)s" method="post">
<table>
<tr>
<td class="admintdright" style="%(style)s" width="5">
<input type="hidden" name="key" value="%(key)s"/>
</td>
<td class="admintdleft" style="%(style)s">
<input type="text" name="mapFrom" size="30" maxlength="255" value="%(key)s" tabindex="%(tabindex_key)s"/>
</td>
<td class="admintdleft" style="%(style)s white-space: nowrap;" width="5">=></td>
<td class="admintdleft"style="%(style)s">
<input type="text" name="mapTo" size="30" value="%(value)s" tabindex="%(tabindex_value)s">
</td>
<td class="admintd" style="%(style)s white-space: nowrap;">
<input class="adminbutton" type="submit" name="save_mapping" value="%(save)s" tabindex="%(tabindex_save_button)s"/>
<input class="adminbutton" type="submit" name="delete_mapping" value="%(delete)s"/></td>
</tr></table></form></td></tr>
''' % {'key': cgi.escape(mapping['key'], 1),
'value':cgi.escape(mapping['value'], 1),
'ln':ln,
'style':style,
'tabindex_key': tabindex_key,
'tabindex_value': tabindex_value,
'tabindex_save_button': tabindex_save_button,
'kb_id':kb_id,
'sortby':sortby,
'save': _("Save"),
'delete': _("Delete")}
out += row_content
#End of table
out += '</tbody></table>'
out += prevlink+" "+nextlink
out += '</td>'
out += '''
<td width="20%"> </td>
</tr>
</table>
'''
#add a note about exporting
out += "<p>"+_("You can get a these mappings in textual format by: ")
out += "<a href=\""+export+"\">"+export+"</a><br/>"
out += _("And the KBA version by:")+" "
export = export+"&format=kba"
out += "<a href=\""+export+"\">"+export+"</a><br/>"
#add script that will put focus on first field of "add mapping" form
out += '''
<script type="text/javascript">
self.focus();document.addNewMapping.mapFrom.focus()
</script>
'''
return out
def tmpl_admin_kb_show_attributes(self, ln, kb_id, kb_name, description, sortby, kb_type=None):
"""
Returns the attributes of a knowledge base.
@param ln: language
@param kb_id: the id of the kb
@param kb_name: the name of the kb
@param description: the description of the kb
@param sortby: the sorting criteria ('from' or 'to')
@param kb_type: None or taxonomy
@return main management console as html
"""
_ = gettext_set_language(ln) # load the right message language
out = '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="kb?ln=%(ln)s&sortby=%(sortby)s">%(close)s</a></small> </td>
<td>1. <small><a href="kb?ln=%(ln)s&kb=%(kb_id)s&sortby=%(sortby)s">%(mappings)s</a></small> </td>
<td>2. <small>%(attributes)s</small> </td>
</tr>
</table> ''' % {'ln':ln,
'kb_id':kb_id,
'sortby':sortby,
'close': _("Close Editor"),
'menu': _("Menu"),
'mappings': _("Knowledge Base Mappings"),
'attributes':_("Knowledge Base Attributes"),
'dependencies':_("Knowledge Base Dependencies")}
out += '''
<form name="updateAttributes"
action="kb?ln=%(ln)s&action=update_attributes&kb=%(kb_id)s&sortby=%(sortby)s&kb_type=%(kb_type)s" method="post">
<table class="admin_wvar" cellspacing="0">
<tr>
''' % {'ln':ln,
'kb_id':kb_id,
'sortby':sortby,
'kb_type':kb_type}
out += '''
<th colspan="2" class="adminheaderleft">%(kb_name)s attributes [<a href="%(siteurl)s/help/admin/bibknowledge-admin-guide#admin">?</a>]</th>''' % {'kb_name': kb_name,
'siteurl': CFG_SITE_URL}
out += '''
</tr>
<tr>
<td class="admintdright">
<input type="hidden" name="key" value="%(kb_id)s"/>
<label for="name">Name</label>: </td>
<td><input tabindex="4" name="name" type="text" id="name" size="25" value="%(kb_name)s"/></td>
</tr>
<tr>
<td class="admintdright" valign="top"><label for="description">Description</label>: </td>
<td><textarea tabindex="5" name="description" id="description" rows="4" cols="25">%(kb_description)s</textarea> </td>
</tr>
<tr>
<td> </td>
<td align="right"><input tabindex="6" class="adminbutton" type="submit" value="%(update_base_attributes)s"/></td>
</tr>
</table>
</form></td>''' % {'kb_name': cgi.escape(kb_name, 1),
'kb_description': cgi.escape(description, 1),
'kb_id':kb_id,
'update_base_attributes':_("Update Base Attributes")}
return out
def tmpl_admin_kb_show_dependencies(self, ln, kb_id, kb_name, sortby, format_elements):
"""
Returns the attributes of a knowledge base.
@param ln: language
@param kb_id: the id of the kb
@param kb_name: the name of the kb
@param sortby: the sorting criteria ('from' or 'to')
@param format_elements: the elements that use this kb
"""
_ = gettext_set_language(ln) # load the right message language
out = '''
<table class="admin_wvar" cellspacing="0">
<tr><th colspan="4" class="adminheaderleft">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="kb?ln=%(ln)s&sortby=%(sortby)s">%(close)s</a></small> </td>
<td>1. <small><a href="kb?ln=%(ln)s&kb=%(kb_id)s&sortby=%(sortby)s">%(mappings)s</a></small> </td>
<td>2. <small><a href="kb?ln=%(ln)s&action=attributes&kb=%(kb_id)s&sortby=%(sortby)s">%(attributes)s</a></small> </td>
</tr>
</table> <br/>''' % {'ln':ln,
'kb_id':kb_id,
'sortby':sortby,
'close': _("Close Editor"),
'menu' : _("Menu"),
'mappings': _("Knowledge Base Mappings"),
'attributes':_("Knowledge Base Attributes"),
'dependencies':_("Knowledge Base Dependencies")}
out += ''' <table width="90%" class="admin_wvar" cellspacing="0"><tr>'''
out += '''
<th class="adminheaderleft">Format Elements used by %(name)s*</th>
</tr>
<tr>
<td valign="top"> ''' % {"name": kb_name}
if len(format_elements) == 0:
out += '<p align="center"><i>%s</i></p>' % \
_("This knowledge base is not used in any format elements.")
for format_element in format_elements:
name = format_element['name']
out += '''<a href="format_elements_doc?ln=%(ln)s#%(anchor)s">%(name)s</a><br/>''' % {'name':"bfe_"+name.lower(),
'anchor':name.upper(),
'ln':ln}
out += '''
</td>
</tr>
</table>
<b>*Note</b>: Some knowledge base usages might not be shown. Check manually.
'''
return out
def tmpl_select_rule_action(self, ln, kbid, left, right, leftorright, current, dicts):
"""
Returns a form of actions for the user to decide what to do
if there are overlapping rules.
@param ln: language
@param kbid: knowledge base id
@param left: mapFrom side of current rule
@param right: mapTo side of current rule
@param leftorright: "left" or "right"
@param current: the current item
@param dicts: an array of mapping dictionaries with 'key', 'value', 'id'
"""
_ = gettext_set_language(ln) # load the right message language
gen = _("Your rule: %(x_rule)s",
x_rule=(' <code style="border:1px solid #999">'+cgi.escape(left)+'</code> => <code style="border:1px solid #999">'+cgi.escape(right)+"</pre><p>"))
if (leftorright=='left'):
gen += _("The left side of the rule (%(x_rule)s) already appears in these knowledge bases:",
x_rule=('<code>' + cgi.escape(left) + '</code>'))
else:
gen += _("The right side of the rule (%(x_rule)s) already appears in these knowledge bases:",
x_rule=('<code>' + cgi.escape(right) + '</code>'))
gen += "<br/>"
inkbs = []
dontdoit = False
for d in dicts:
kb = d['kbname']
if kb == current and leftorright == 'left':
dontdoit = True
#two rules with same left side in the same kb? no.
if inkbs.count(kb)==0:
inkbs.append(kb)
kbstr = ", ".join(['<b>%s</b>' % cgi.escape(inkb) for inkb in inkbs])
gen += kbstr
message = _("Please select action")
optreplace = _("Replace the selected rules with this rule")
optadd = _("Add this rule in the current knowledge base")+" ("+cgi.escape(current, 1)+")"
optcancel = _("Cancel: do not add this rule")
formreplace = '''<form action="kb?action=add_mapping&ln=%(ln)s&kb=%(kb_id)s&forcetype=all"
method="post">
<input type="hidden" name="mapFrom" value="%(left)s"/>
<input type="hidden" name="mapTo" value="%(right)s"/>
''' % { 'ln':ln, 'kb_id':kbid, 'left':cgi.escape(left, 1), 'right':cgi.escape(right, 1) }
#make a selectable list of kb's where to push the value..
for d in dicts:
kb = d['kbname']
l = d['key']
r = d['value']
value = cgi.escape(kb, 1)+"++++"+cgi.escape(l, 1)+"++++"+cgi.escape(r, 1)
formreplace += '<input type="checkbox" name="replacements" value="'+value+'" />' + \
'<b>' + cgi.escape(kb) + '</b>: <code style="border:1px solid #999">' + \
cgi.escape(l) + '</code> => <code style="border:1px solid #999">' + cgi.escape(r) + "</code><br/>"
formreplace += ''' <input class="adminbutton"
type="submit" value="%(opt)s"/></form>''' % { 'opt':optreplace }
formadd = '''<form action="kb?action=add_mapping&ln=%(ln)s&kb=%(kb_id)s&forcetype=curr" method="post">
<input type="hidden" name="mapFrom" value="%(left)s"/>
<input type="hidden" name="mapTo" value="%(right)s"/>
<input class="adminbutton"
type="submit" value="%(opt)s"/></form>''' % { 'opt':optadd, 'ln':ln,
'kb_id':kbid,
'left':cgi.escape(left, 1), 'right':cgi.escape(right, 1) }
formcancel = '''<form action="kb?ln=%(ln)s&kb=%(kb_id)s">
<input type="hidden" name="kb" value="%(kb_id)s">
<input class="adminbutton"
type="submit" value="%(opt)s"/></form>''' % { 'ln': ln, 'kb_id':kbid, 'opt':optcancel }
if dontdoit:
formadd = _("It is not possible to have two rules with the same left side in the same knowledge base.")+"<p>"
out = gen+"<p>"+message+"<p>"+formadd+formcancel+"<p><p><p>"+formreplace
return out
| gpl-2.0 |
robwilkerson/BitBucket-api | bitbucket/issue_comment.py | 2 | 4234 | # -*- coding: utf-8 -*-
URLS = {
# Issue comments
'GET_COMMENTS': 'repositories/%(username)s/%(repo_slug)s/issues/%(issue_id)s/comments/',
'GET_COMMENT': 'repositories/%(username)s/%(repo_slug)s/issues/%(issue_id)s/comments/%(comment_id)s/',
'CREATE_COMMENT': 'repositories/%(username)s/%(repo_slug)s/issues/%(issue_id)s/comments/',
'UPDATE_COMMENT': 'repositories/%(username)s/%(repo_slug)s/issues/%(issue_id)s/comments/%(comment_id)s/',
'DELETE_COMMENT': 'repositories/%(username)s/%(repo_slug)s/issues/%(issue_id)s/comments/%(comment_id)s/',
}
class IssueComment(object):
""" This class provide issue's comments related methods to Bitbucket objects."""
def __init__(self, issue):
self.issue = issue
self.bitbucket = self.issue.bitbucket
self.bitbucket.URLS.update(URLS)
self.issue_id = issue.issue_id
def all(self, issue_id=None, repo_slug=None , owner=None):
""" Get issue comments from one of your repositories.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
owner = owner or self.bitbucket.username
url = self.bitbucket.url('GET_COMMENTS',
username=owner,
repo_slug=repo_slug,
issue_id=issue_id)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def get(self, comment_id, issue_id=None, repo_slug=None, owner=None):
""" Get an issue from one of your repositories.
"""
issue_id = issue_id or self.issue_id
owner = owner or self.bitbucket.username
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_COMMENT',
username=owner,
repo_slug=repo_slug,
issue_id=issue_id,
comment_id=comment_id)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def create(self, issue_id=None, repo_slug=None, owner=None, **kwargs):
""" Add an issue comment to one of your repositories.
Each issue comment require only the content data field
the system autopopulate the rest.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
owner = owner or self.bitbucket.username
url = self.bitbucket.url('CREATE_COMMENT',
username=owner,
repo_slug=repo_slug,
issue_id=issue_id)
return self.bitbucket.dispatch('POST', url, auth=self.bitbucket.auth, **kwargs)
def update(self, comment_id, issue_id=None, repo_slug=None, owner=None, **kwargs):
""" Update an issue comment in one of your repositories.
Each issue comment require only the content data field
the system autopopulate the rest.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
owner = owner or self.bitbucket.username
url = self.bitbucket.url('UPDATE_COMMENT',
username=owner,
repo_slug=repo_slug,
issue_id=issue_id,
comment_id=comment_id)
return self.bitbucket.dispatch('PUT', url, auth=self.bitbucket.auth, **kwargs)
def delete(self, comment_id, issue_id=None, repo_slug=None, owner=None):
""" Delete an issue from one of your repositories.
"""
issue_id = issue_id or self.issue_id
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
owner = owner or self.bitbucket.username
url = self.bitbucket.url('DELETE_COMMENT',
username=owner,
repo_slug=repo_slug,
issue_id=issue_id,
comment_id=comment_id)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
| isc |
apark263/tensorflow | tensorflow/python/ops/nn_ops.py | 2 | 167463 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
# pylint: disable=protected-access
def _non_atrous_convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
data_format=None, # pylint: disable=redefined-builtin
strides=None,
name=None):
"""Computes sums of N-D convolutions (actually cross correlation).
It is required that 1 <= N <= 3.
This is used to implement the more generic `convolution` function, which
extends the interface of this function with a `dilation_rate` parameter.
Args:
input: Rank N+2 tensor of type T of shape
`[batch_size] + input_spatial_shape + [in_channels]` if `data_format`
does not start with `"NC"`, or
`[batch_size, in_channels] + input_spatial_shape` if `data_format` starts
with `"NC"`.
filter: Rank N+2 tensor of type T of shape
`filter_spatial_shape + [in_channels, out_channels]`. Rank of either
`input` or `filter` must be known.
padding: Padding method to use, must be either "VALID" or "SAME".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
strides: Sequence of N positive integers, defaults to `[1] * N`.
name: Name prefix to use.
Returns:
Rank N+2 tensor of type T of shape
`[batch_size] + output_spatial_shape + [out_channels]`, where
if padding == "SAME":
output_spatial_shape = input_spatial_shape
if padding == "VALID":
output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1.
Raises:
ValueError: if ranks are incompatible.
"""
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = _NonAtrousConvolution(
input_shape,
filter_shape=filter_shape,
padding=padding,
data_format=data_format,
strides=strides,
name=scope)
return op(input, filter)
class _NonAtrousConvolution(object):
"""Helper class for _non_atrous_convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments:
input_shape: static input shape, i.e. input.get_shape().
filter_shape: static filter shape, i.e. filter.get_shape().
padding: see _non_atrous_convolution.
data_format: see _non_atrous_convolution.
strides: see _non_atrous_convolution.
name: see _non_atrous_convolution.
"""
def __init__(
self,
input_shape,
filter_shape, # pylint: disable=redefined-builtin
padding,
data_format=None,
strides=None,
name=None):
filter_shape = filter_shape.with_rank(input_shape.ndims)
self.padding = padding
self.name = name
input_shape = input_shape.with_rank(filter_shape.ndims)
if input_shape.ndims is None:
raise ValueError("Rank of convolution must be known")
if input_shape.ndims < 3 or input_shape.ndims > 5:
raise ValueError(
"`input` and `filter` must have rank at least 3 and at most 5")
conv_dims = input_shape.ndims - 2
if strides is None:
strides = [1] * conv_dims
elif len(strides) != conv_dims:
raise ValueError("len(strides)=%d, but should be %d" % (len(strides),
conv_dims))
if conv_dims == 1:
# conv1d uses the 2-d data format names
if data_format is None:
data_format = "NWC"
elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
self.strides = strides[0]
self.data_format = data_format
self.conv_op = self._conv1d
elif conv_dims == 2:
if data_format is None or data_format == "NHWC":
data_format = "NHWC"
strides = [1] + list(strides) + [1]
elif data_format == "NCHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")
self.strides = strides
self.data_format = data_format
self.conv_op = conv2d
elif conv_dims == 3:
if data_format is None or data_format == "NDHWC":
strides = [1] + list(strides) + [1]
elif data_format == "NCDHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s"
% data_format)
self.strides = strides
self.data_format = data_format
self.conv_op = gen_nn_ops.conv3d
# Note that we need this adapter since argument names for conv1d don't match
# those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.
# pylint: disable=redefined-builtin
def _conv1d(self, input, filter, strides, padding, data_format, name):
return conv1d(
value=input,
filters=filter,
stride=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(
input=inp,
filter=filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name=self.name)
@tf_export("nn.dilation2d", v1=[])
def dilation2d_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors.
The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation
(for consistency with `conv2d`, we use unmirrored filters):
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filters[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling
kernel size and contains all zeros.
Note on duality: The dilation of `input` by the `filters` is equal to the
negation of the erosion of `-input` by the reflected `filters`.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input
tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NCHW"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
The input stride for atrous morphological dilation. Must be:
`[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if data_format != "NCHW":
raise ValueError("Data formats other than NCHW are not yet supported")
return gen_nn_ops.dilation2d(input=input,
filter=filters,
strides=strides,
rates=dilations,
padding=padding,
name=name)
@tf_export("nn.with_space_to_batch")
def with_space_to_batch(
input, # pylint: disable=redefined-builtin
dilation_rate,
padding,
op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Performs `op` on the space-to-batch representation of `input`.
This has the effect of transforming sliding window operations into the
corresponding "atrous" operation in which the input is sampled at the
specified `dilation_rate`.
In the special case that `dilation_rate` is uniformly 1, this simply returns:
op(input, num_spatial_dims, padding)
Otherwise, it returns:
batch_to_space_nd(
op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings),
num_spatial_dims,
"VALID")
adjusted_dilation_rate,
adjusted_crops),
where:
adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)],
adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2]
defined as follows:
We first define two int64 tensors `paddings` and `crops` of shape
`[num_spatial_dims, 2]` based on the value of `padding` and the spatial
dimensions of the `input`:
If `padding = "VALID"`, then:
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate)
If `padding = "SAME"`, then:
dilated_filter_shape =
filter_shape + (filter_shape - 1) * (dilation_rate - 1)
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate,
[(dilated_filter_shape - 1) // 2,
dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2])
Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial
dimensions are contiguous starting at the second dimension, but the specified
`spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and
`crops` in order to be usable with these operations. For a given dimension,
if the block size is 1, and both the starting and ending padding and crop
amounts are 0, then space_to_batch_nd effectively leaves that dimension alone,
which is what is needed for dimensions not part of `spatial_dims`.
Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case
efficiently for any number of leading and trailing dimensions.
For 0 <= i < len(spatial_dims), we assign:
adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i]
adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :]
adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :]
All unassigned values of `adjusted_dilation_rate` default to 1, while all
unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0.
Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID"
padding is equivalent to specifying `padding = "SAME"` with a filter_shape of
`[1]*N`.
Advanced usage. Note the following optimization: A sequence of
`with_space_to_batch` operations with identical (not uniformly 1)
`dilation_rate` parameters and "VALID" padding
net = with_space_to_batch(net, dilation_rate, "VALID", op_1)
...
net = with_space_to_batch(net, dilation_rate, "VALID", op_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "VALID")
...
result = op_k(result, num_spatial_dims, "VALID")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and
`batch_to_space_nd`.
Similarly, a sequence of `with_space_to_batch` operations with identical (not
uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter
dimensions
net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1)
...
net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "SAME")
...
result = op_k(result, num_spatial_dims, "SAME")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
Args:
input: Tensor of rank > max(spatial_dims).
dilation_rate: int32 Tensor of *known* shape [num_spatial_dims].
padding: str constant equal to "VALID" or "SAME"
op: Function that maps (input, num_spatial_dims, padding) -> output
filter_shape: If padding = "SAME", specifies the shape of the convolution
kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims].
If padding = "VALID", filter_shape is ignored and need not be specified.
spatial_dims: Monotonically increasing sequence of `num_spatial_dims`
integers (which are >= 1) specifying the spatial dimensions of `input`
and output. Defaults to: `range(1, num_spatial_dims+1)`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
The output Tensor as described above, dimensions will vary based on the op
provided.
Raises:
ValueError: if `padding` is invalid or the arguments are incompatible.
ValueError: if `spatial_dims` are invalid.
"""
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
def build_op(num_spatial_dims, padding):
return lambda inp, _: op(inp, num_spatial_dims, padding)
new_op = _WithSpaceToBatch(
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
return new_op(input, None)
class _WithSpaceToBatch(object):
"""Helper class for with_space_to_batch.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
dilation_rate: see with_space_to_batch
padding: see with_space_to_batch
build_op: Function that maps (num_spatial_dims, paddings) -> (function that
maps (input, filter) -> output).
filter_shape: see with_space_to_batch
spatial_dims: see with_space_to_batch
data_format: see with_space_to_batch
"""
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Helper class for _with_space_to_batch."""
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
try:
rate_shape = dilation_rate.get_shape().with_rank(1)
except ValueError:
raise ValueError("rate must be rank 1")
if not dilation_rate.get_shape().is_fully_defined():
raise ValueError("rate must have known shape")
num_spatial_dims = rate_shape.dims[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = 2
else:
starting_spatial_dim = 1
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"spatial_dims must be a montonically increasing sequence of positive "
"integers") # pylint: disable=line-too-long
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
raise ValueError(
"input tensor must have rank %d at least" % (expected_input_rank))
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError("dilation_rate must be positive")
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError("filter_shape must be specified for SAME padding")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
else:
raise ValueError("Invalid padding method %r" % padding)
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
"""Call functionality for with_space_to_batch."""
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape.dims[1].value and filter is not None:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
"""Helper function to compute base_paddings."""
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
dilated_filter_spatial_shape = (
filter_spatial_shape + (filter_spatial_shape - 1) *
(rate_or_const_rate - 1))
pad_extra_shape = dilated_filter_spatial_shape - 1
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
"""Returns an `adjusted` version of `orig` based on `spatial_dims`.
Tensor of the same type as `orig` and with shape
`[max(spatial_dims), ...]` where:
adjusted[spatial_dims[i] - 1, ...] = orig[i, ...]
for 0 <= i < len(spatial_dims), and
adjusted[j, ...] = fill_value
for j != spatial_dims[i] - 1 for some i.
If `orig` is a constant value, then the result will be a constant value.
Args:
orig: Tensor of rank > max(spatial_dims).
fill_value: Numpy scalar (of same data type as `orig) specifying the fill
value for non-spatial dimensions.
spatial_dims: See with_space_to_batch.
Returns:
`adjusted` tensor.
"""
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
"""Helper function for verifying strides and dilation_rate arguments.
This is used by `convolution` and `pool`.
Args:
num_spatial_dims: int
strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value
of strides is > 1, then all values of dilation_rate must be 1.
dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any
value of dilation_rate is > 1, then all values of strides must be 1.
Returns:
Normalized (strides, dilation_rate) as int32 numpy arrays of shape
[num_spatial_dims].
Raises:
ValueError: if the parameters are invalid.
"""
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError("len(dilation_rate)=%d but should be %d" %
(len(dilation_rate), num_spatial_dims))
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of dilation_rate must be positive")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError("len(strides)=%d but should be %d" % (len(strides),
num_spatial_dims))
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of strides must be positive")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"strides > 1 not supported in conjunction with dilation_rate > 1")
return strides, dilation_rate
@tf_export(v1=["nn.convolution"])
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
# pylint: disable=line-too-long
"""Computes sums of N-D convolutions (actually cross-correlation).
This also supports either output striding via the optional `strides` parameter
or atrous convolution (also known as convolution with holes or dilated
convolution, based on the French word "trous" meaning holes in English) via
the optional `dilation_rate` parameter. Currently, however, output striding
is not supported for atrous convolutions.
Specifically, in the case that `data_format` does not start with "NC", given
a rank (N+2) `input` Tensor of shape
[num_batches,
input_spatial_shape[0],
...,
input_spatial_shape[N-1],
num_input_channels],
a rank (N+2) `filter` Tensor of shape
[spatial_filter_shape[0],
...,
spatial_filter_shape[N-1],
num_input_channels,
num_output_channels],
an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N)
specifying the filter upsampling/input downsampling rate, and an optional list
of N `strides` (defaulting [1]*N), this computes for each N-D spatial output
position (x[0], ..., x[N-1]):
```
output[b, x[0], ..., x[N-1], k] =
sum_{z[0], ..., z[N-1], q}
filter[z[0], ..., z[N-1], q, k] *
padded_input[b,
x[0]*strides[0] + dilation_rate[0]*z[0],
...,
x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1],
q]
```
where b is the index into the batch, k is the output channel number, q is the
input channel number, and z is the N-D spatial offset within the filter. Here,
`padded_input` is obtained by zero padding the input using an effective
spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and
output striding `strides` as described in the
[comment here](https://tensorflow.org/api_guides/python/nn#Convolution).
In the case that `data_format` does start with `"NC"`, the `input` and output
(but not the `filter`) are simply transposed as follows:
convolution(input, data_format, **kwargs) =
tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
It is required that 1 <= N <= 3.
Args:
input: An (N+2)-D `Tensor` of type `T`, of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
filter: An (N+2)-D `Tensor` with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`.
padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm.
strides: Optional. Sequence of N ints >= 1. Specifies the output stride.
Defaults to [1]*N. If any value of strides is > 1, then all values of
dilation_rate must be 1.
dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter
upsampling/input downsampling rate. In the literature, the same parameter
is sometimes called `input stride` or `dilation`. The effective filter
size used for the convolution will be `spatial_filter_shape +
(spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting
(dilation_rate[i]-1) zeros between consecutive elements of the original
filter in each spatial dimension i. If any value of dilation_rate is > 1,
then all values of strides must be 1.
name: Optional name for the returned tensor.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
A `Tensor` with the same type as `input` of shape
`[batch_size] + output_spatial_shape + [out_channels]`
if data_format is None or does not start with "NC", or
`[batch_size, out_channels] + output_spatial_shape`
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of `padding`.
If padding == "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding == "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] -
(spatial_filter_shape[i]-1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: If input/output depth does not match `filter` shape, if padding
is other than `"VALID"` or `"SAME"`, or if data_format is invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(name, "convolution", [input, filter]) as name:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = Convolution(
input_shape,
filter_shape,
padding,
strides=strides,
dilation_rate=dilation_rate,
name=name,
data_format=data_format)
return op(input, filter)
@tf_export("nn.convolution", v1=[])
def convolution_v2(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
return convolution(
input, # pylint: disable=redefined-builtin
filters,
padding=padding,
strides=strides,
dilation_rate=dilations,
name=name,
data_format=data_format)
convolution_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
convolution.__doc__, "dilation_rate", "dilations"),
"filter", "filters")
class Convolution(object):
"""Helper class for convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
filter_shape: static shape of the filter. i.e. filter.get_shape().
padding: see convolution.
strides: see convolution.
dilation_rate: see convolution.
name: see convolution.
data_format: see convolution.
"""
def __init__(self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
"""Helper function for convolution."""
num_total_dims = filter_shape.ndims
if num_total_dims is None:
num_total_dims = input_shape.ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known")
num_spatial_dims = num_total_dims - 2
try:
input_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"input tensor must have rank %d" % (num_spatial_dims + 2))
try:
filter_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"filter tensor must have rank %d" % (num_spatial_dims + 2))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + 1)
spatial_dims = range(1, num_spatial_dims + 1)
else:
input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1)
spatial_dims = range(2, num_spatial_dims + 2)
if not input_channels_dim.is_compatible_with(
filter_shape[num_spatial_dims]):
raise ValueError(
"number of input channels does not match corresponding dimension of "
"filter, {} != {}".format(input_channels_dim,
filter_shape[num_spatial_dims]))
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.name = name
self.conv_op = _WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
def _build_op(self, _, padding):
return _NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name)
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(inp, filter)
@tf_export(v1=["nn.pool"])
def pool(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding,
dilation_rate=None,
strides=None,
name=None,
data_format=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
See the "returns" section of `tf.nn.convolution` for details.
dilation_rate: Optional. Dilation rate. List of N ints >= 1.
Defaults to [1]*N. If any value of dilation_rate is > 1, then all values
of strides must be 1.
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N.
If any value of strides is > 1, then all values of dilation_rate must be
1.
name: Optional. Name of the op.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
raise ValueError("It is required that 1 <= num_spatial_dims <= 3.")
input.get_shape().with_rank(num_spatial_dims + 2)
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
if padding == "SAME" and np.any(dilation_rate > 1):
raise ValueError(
"pooling with SAME padding is not implemented for dilation_rate > 1")
if np.any(strides > window_shape):
raise ValueError(
"strides > window_shape not supported due to inconsistency between "
"CPU and GPU implementations")
pooling_ops = {
("MAX", 1): max_pool,
("MAX", 2): max_pool,
("MAX", 3): max_pool3d, # pylint: disable=undefined-variable
("AVG", 1): avg_pool,
("AVG", 2): avg_pool,
("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable
}
op_key = (pooling_type, num_spatial_dims)
if op_key not in pooling_ops:
raise ValueError("%d-D %s pooling is not supported." % (op_key[1],
op_key[0]))
if data_format is None or not data_format.startswith("NC"):
adjusted_window_shape = [1] + list(window_shape) + [1]
adjusted_strides = [1] + list(strides) + [1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
adjusted_window_shape = [1, 1] + list(window_shape)
adjusted_strides = [1, 1] + list(strides)
spatial_dims = range(2, num_spatial_dims + 2)
if num_spatial_dims == 1:
if data_format is None or data_format == "NWC":
data_format_kwargs = dict(data_format="NHWC")
elif data_format == "NCW":
data_format_kwargs = dict(data_format="NCHW")
else:
raise ValueError("data_format must be either \"NWC\" or \"NCW\".")
adjusted_window_shape = [1] + adjusted_window_shape
adjusted_strides = [1] + adjusted_strides
else:
data_format_kwargs = dict(data_format=data_format)
def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring
if num_spatial_dims == 1:
converted_input = array_ops.expand_dims(converted_input,
spatial_dims[0])
result = pooling_ops[op_key](
converted_input,
adjusted_window_shape,
adjusted_strides,
converted_padding,
name=scope,
**data_format_kwargs)
if num_spatial_dims == 1:
result = array_ops.squeeze(result, [spatial_dims[0]])
return result
return with_space_to_batch(
input=input,
dilation_rate=dilation_rate,
padding=padding,
op=op,
spatial_dims=spatial_dims,
filter_shape=window_shape)
@tf_export("nn.pool", v1=[])
def pool_v2(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if data_format does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of
strides is > 1, then all values of dilation_rate must be 1.
padding: The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME".
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to
[1]*N. If any value of dilation_rate is > 1, then all values of strides
must be 1.
name: Optional. Name of the op.
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
return pool(
input=input,
window_shape=window_shape,
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilations,
strides=strides,
name=name,
data_format=data_format)
@tf_export("nn.atrous_conv2d")
def atrous_conv2d(value, filters, rate, padding, name=None):
"""Atrous convolution (a.k.a. convolution with holes or dilated convolution).
This function is a simpler wrapper around the more general
`tf.nn.convolution`, and exists only for backwards compatibility. You can
use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution.
Computes a 2-D atrous convolution, also known as convolution with holes or
dilated convolution, given 4-D `value` and `filters` tensors. If the `rate`
parameter is equal to one, it performs regular 2-D convolution. If the `rate`
parameter is greater than one, it performs convolution with holes, sampling
the input values every `rate` pixels in the `height` and `width` dimensions.
This is equivalent to convolving the input with a set of upsampled filters,
produced by inserting `rate - 1` zeros between two consecutive values of the
filters along the `height` and `width` dimensions, hence the name atrous
convolution or convolution with holes (the French word trous means holes in
English).
More specifically:
```
output[batch, height, width, out_channel] =
sum_{dheight, dwidth, in_channel} (
filters[dheight, dwidth, in_channel, out_channel] *
value[batch, height + rate*dheight, width + rate*dwidth, in_channel]
)
```
Atrous convolution allows us to explicitly control how densely to compute
feature responses in fully convolutional networks. Used in conjunction with
bilinear interpolation, it offers an alternative to `conv2d_transpose` in
dense prediction tasks such as semantic image segmentation, optical flow
computation, or depth estimation. It also allows us to effectively enlarge
the field of view of filters without increasing the number of parameters or
the amount of computation.
For a description of atrous convolution and how it can be used for dense
feature extraction, please see: [Semantic Image Segmentation with Deep
Convolutional Nets and Fully Connected CRFs](http://arxiv.org/abs/1412.7062).
The same operation is investigated further in [Multi-Scale Context Aggregation
by Dilated Convolutions](http://arxiv.org/abs/1511.07122). Previous works
that effectively use atrous convolution in different ways are, among others,
[OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks](http://arxiv.org/abs/1312.6229) and [Fast Image
Scanning with Deep Max-Pooling Convolutional Neural
Networks](http://arxiv.org/abs/1302.1700).
Atrous convolution is also closely related to the so-called noble identities
in multi-rate signal processing.
There are many different ways to implement atrous convolution (see the refs
above). The implementation here reduces
```python
atrous_conv2d(value, filters, rate, padding=padding)
```
to the following three operations:
```python
paddings = ...
net = space_to_batch(value, paddings, block_size=rate)
net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID")
crops = ...
net = batch_to_space(net, crops, block_size=rate)
```
Advanced usage. Note the following optimization: A sequence of `atrous_conv2d`
operations with identical `rate` parameters, 'SAME' `padding`, and filters
with odd heights/ widths:
```python
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
```
can be equivalently performed cheaper in terms of computation and memory as:
```python
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
```
because a pair of consecutive `space_to_batch` and `batch_to_space` ops with
the same `block_size` cancel out when their respective `paddings` and `crops`
inputs are identical.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC"
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, in_channels, out_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Output shape with `'VALID'` padding is:
[batch, height - 2 * (filter_width - 1),
width - 2 * (filter_height - 1), out_channels].
Output shape with `'SAME'` padding is:
[batch, height, width, out_channels].
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
return convolution(
input=value,
filter=filters,
padding=padding,
dilation_rate=np.broadcast_to(rate, (2,)),
name=name)
def _convert_padding(padding):
"""Converts Python padding to C++ padding for ops which take EXPLICIT padding.
Args:
padding: the `padding` argument for a Python op which supports EXPLICIT
padding.
Returns:
(padding, explicit_paddings) pair, which should be passed as attributes to a
C++ op.
Raises:
ValueError: If padding is invalid.
"""
explicit_paddings = []
if padding == "EXPLICIT":
# Give a better error message if EXPLICIT is passed.
raise ValueError('"EXPLICIT" is not a valid value for the padding '
"parameter. To use explicit padding, the padding "
"parameter must be a list.")
if isinstance(padding, (list, tuple)):
for i, dim_paddings in enumerate(padding):
if not isinstance(dim_paddings, (list, tuple)):
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding is not a list/tuple" % i)
if len(dim_paddings) != 2:
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding has size %d" % (i, len(dim_paddings)))
explicit_paddings.extend(dim_paddings)
if len(padding) != 4:
raise ValueError("When padding is a list, it must be of size 4. Got "
"padding of size: %d" % len(padding))
padding = "EXPLICIT"
return padding, explicit_paddings
@tf_export("nn.conv2d", v1=[])
def conv2d_v2(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
# pylint: disable=line-too-long
r"""Computes a 2-D convolution given 4-D `input` and `filters` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filters: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: A list of `ints`.
1-D tensor of length 4. The stride of the sliding window for each
dimension of `input`. The dimension order is determined by the value of
`data_format`, see below for details.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
# pylint: enable=line-too-long
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d"])
def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q]
* filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filter: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: A list of `ints`.
1-D tensor of length 4. The stride of the sliding window for each
dimension of `input`. The dimension order is determined by the value of
`data_format`, see below for details.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d(input, # pylint: disable=redefined-builtin
filter,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv2d_backprop_filter", v1=[])
def conv2d_backprop_filter_v2(input, # pylint: disable=redefined-builtin
filter_sizes,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d_backprop_filter(input, # pylint: disable=redefined-builtin
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_filter"])
def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_filter(
input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export("nn.conv2d_backprop_input", v1=[])
def conv2d_backprop_input_v2(input_sizes,
filters,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filters: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filters`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filters`.
"""
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d_backprop_input(input_sizes,
filters,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_input"])
def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_transpose"])
def conv2d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv2d_transpose",
[value, filter, output_shape]) as name:
if data_format not in ("NCHW", "NHWC"):
raise ValueError("data_format has to be either NCHW or NHWC.")
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
axis = 3 if data_format == "NHWC" else 1
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[3]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[3]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filter.get_shape().dims[2].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[2]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.conv2d_transpose", v1=[])
def conv2d_transpose_v2(
input,
filters, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
name=None):
return conv2d_transpose(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
conv2d_transpose_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
conv2d_transpose.__doc__, "filter", "filters"),
"value", "input")
@tf_export("nn.atrous_conv2d_transpose")
def atrous_conv2d_transpose(value,
filters,
output_shape,
rate,
padding,
name=None):
"""The transpose of `atrous_conv2d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `atrous_conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC`
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, out_channels, in_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
output_shape: A 1-D `Tensor` of shape representing the output shape of the
deconvolution op.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less
than one, or if the output_shape is not a tensor with 4 elements.
"""
with ops.name_scope(name, "atrous_conv2d_transpose",
[value, filters, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3],
filters.get_shape()[3]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
return conv2d_transpose(
value,
filters,
output_shape,
strides=[1, 1, 1, 1],
padding=padding,
data_format="NHWC")
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3],
filters.get_shape()[2]))
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
# Handle filters whose shape is unknown during graph creation.
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
# When pad_height (pad_width) is odd, we pad more to bottom (right),
# following the same convention as conv2d().
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
in_height = output_shape[1] + pad_top + pad_bottom
in_width = output_shape[2] + pad_left + pad_right
# More padding so that rate divides the height and width of the input.
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
# The paddings argument to space_to_batch is just the extra padding
# component.
space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.space_to_batch(
input=value, paddings=space_to_batch_pad, block_size=rate)
input_sizes = [
rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,
(in_width + pad_right_extra) // rate, output_shape[3]
]
value = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=value,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
# The crops argument to batch_to_space includes both padding components.
batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
return array_ops.batch_to_space(
input=value, crops=batch_to_space_crop, block_size=rate)
@tf_export("nn.conv3d", v1=[])
def conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring
filters,
strides,
padding,
data_format="NDHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1, 1]
return gen_nn_ops.conv3d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
name=name)
tf_export(v1=["nn.conv3d"])(gen_nn_ops.conv3d)
conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring(
gen_nn_ops.conv3d.__doc__, "filter", "filters")
@tf_export(v1=["nn.conv3d_transpose"])
def conv3d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
name=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv3d` rather than an actual
deconvolution.
Args:
value: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]`.
filter: A 5-D `Tensor` with the same type as `value` and shape
`[depth, height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout
of the input and output tensors. Defaults to `'NDHWC'`.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv3d_transpose",
[value, filter, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
axis = 1 if data_format == "NCDHW" else 4
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[4]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[4]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(5)):
raise ValueError("output_shape must have shape (5,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [5] if reached this point.
if not filter.get_shape().dims[3].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[3]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv3d_backprop_input_v2(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.conv3d_transpose", v1=[])
def conv3d_transpose_v2(
input,
filters,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
name=None):
return conv3d_transpose(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
conv3d_transpose_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
conv3d_transpose.__doc__, "filter", "filters"),
"value", "input")
@tf_export("nn.bias_add")
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add(value, bias, data_format=data_format, name=name)
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add_v1(value, bias, name=name)
@tf_export(v1=["nn.crelu"])
def crelu(features, name=None, axis=-1):
"""Computes Concatenated ReLU.
Concatenates a ReLU which selects only the positive part of the activation
with a ReLU which selects only the *negative* part of the activation.
Note that as a result this non-linearity doubles the depth of the activations.
Source: [Understanding and Improving Convolutional Neural Networks via
Concatenated Rectified Linear Units. W. Shang, et
al.](https://arxiv.org/abs/1603.05201)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
axis: The axis that the output values are concatenated along. Default is -1.
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], axis, name=name)
return gen_nn_ops.relu(c)
@tf_export("nn.crelu", v1=[])
def crelu_v2(features, axis=-1, name=None):
return crelu(features, name=name, axis=axis)
crelu_v2.__doc__ = crelu.__doc__
@tf_export("nn.relu6")
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Source: [Convolutional Deep Belief Networks on CIFAR-10. A.
Krizhevsky](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops.relu6(features, name=name)
@tf_export("nn.leaky_relu")
def leaky_relu(features, alpha=0.2, name=None):
"""Compute the Leaky ReLU activation function.
"Rectifier Nonlinearities Improve Neural Network Acoustic Models"
AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013
https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf
Args:
features: A `Tensor` representing preactivation values. Must be one of
the following types: `float16`, `float32`, `float64`, `int32`, `int64`.
alpha: Slope of the activation function at x < 0.
name: A name for the operation (optional).
Returns:
The activation value.
"""
with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name:
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.to_float(features)
if compat.forward_compatible(2018, 11, 1):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)
alpha = ops.convert_to_tensor(alpha, dtype=features.dtype, name="alpha")
return math_ops.maximum(alpha * features, features, name=name)
def _flatten_outer_dims(logits):
"""Flattens logits' outer dimensions and keep its last dimension."""
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
if not context.executing_eagerly():
shape = logits.get_shape()
if shape is not None and shape.dims is not None:
shape = shape.as_list()
product = 1
product_valid = True
for d in shape[:-1]:
if d is None:
product_valid = False
break
else:
product *= d
if product_valid:
output_shape = [product, shape[-1]]
output.set_shape(output_shape)
return output
def _softmax(logits, compute_op, dim=-1, name=None):
"""Helper function for softmax and log_softmax.
It reshapes and transposes the input logits into a 2-D Tensor and then invokes
the tf.nn._softmax or tf.nn._log_softmax function. The output would be
transposed and reshaped back.
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
compute_op: Either gen_nn_ops.softmax or gen_nn_ops.log_softmax
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `dim` is beyond the last
dimension of `logits`.
"""
def _swap_axis(logits, dim_index, last_index, name=None):
"""Swaps logits's dim_index and last_index."""
return array_ops.transpose(
logits,
array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index), [dim_index]
], 0),
name=name)
logits = ops.convert_to_tensor(logits)
# We need its original shape for shape inference.
shape = logits.get_shape()
is_last_dim = (dim is -1) or (dim == shape.ndims - 1)
if is_last_dim:
return compute_op(logits, name=name)
dim_val = dim
if isinstance(dim, ops.Tensor):
dim_val = tensor_util.constant_value(dim)
if dim_val is not None and (dim_val < -shape.ndims or dim_val >= shape.ndims):
raise errors_impl.InvalidArgumentError(
None, None,
"Dimension (%d) must be in the range [%d, %d) where %d is the number of"
" dimensions in the input." % (dim_val, -shape.ndims, shape.ndims,
shape.ndims))
# If dim is not the last dimension, we have to do a transpose so that we can
# still perform softmax on its last dimension.
# In case dim is negative (and is not last dimension -1), add shape.ndims
ndims = array_ops.rank(logits)
if not isinstance(dim, ops.Tensor):
if dim < 0:
dim += ndims
else:
dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)
# Swap logits' dimension of dim and its last dimension.
input_rank = array_ops.rank(logits)
dim_axis = dim % shape.ndims
logits = _swap_axis(logits, dim_axis, math_ops.subtract(input_rank, 1))
# Do the actual softmax on its last dimension.
output = compute_op(logits)
output = _swap_axis(
output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
# Make shape inference work since transpose may erase its static shape.
output.set_shape(shape)
return output
@tf_export(v1=["nn.softmax", "math.softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export("nn.softmax", "math.softmax", v1=[])
def softmax_v2(logits, axis=None, name=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export(v1=["nn.log_softmax", "math.log_softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
@tf_export("nn.log_softmax", "math.log_softmax", v1=[])
def log_softmax_v2(logits, axis=None, name=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
def _ensure_xent_args(name, sentinel, labels, logits):
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)" % name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
@tf_export("nn.softmax_cross_entropy_with_logits", v1=[])
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
return softmax_cross_entropy_with_logits_v2_helper(
labels=labels, logits=logits, axis=axis, name=name)
@tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"])
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax_cross_entropy_with_logits_v2_helper(
labels, logits, axis=None, name=None, dim=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for axis.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
# TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This
# could break users who call this with bad labels, but disregard the bad
# results.
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
del dim
if axis is None:
axis = -1
with ops.name_scope(name, "softmax_cross_entropy_with_logits",
[logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
convert_to_float32 = (
logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)
precise_logits = math_ops.cast(
logits, dtypes.float32) if convert_to_float32 else logits
# labels and logits must be of the same type
labels = math_ops.cast(labels, precise_logits.dtype)
input_rank = array_ops.rank(precise_logits)
# For shape inference.
shape = logits.get_shape()
# Move the dim to the end if dim is not the last dimension.
if axis != -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(
tensor,
array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank), [dim_index]
], 0))
precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)
labels = _move_dim_to_end(labels, axis, input_rank)
input_shape = array_ops.shape(precise_logits)
# Make precise_logits and labels into matrices.
precise_logits = _flatten_outer_dims(precise_logits)
labels = _flatten_outer_dims(labels)
# Do the actual op computation.
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
# The output cost shape should be the input minus axis.
output_shape = array_ops.slice(input_shape, [0],
[math_ops.subtract(input_rank, 1)])
cost = array_ops.reshape(cost, output_shape)
# Make shape inference work since reshape and transpose may erase its static
# shape.
if not context.executing_eagerly(
) and shape is not None and shape.dims is not None:
shape = shape.as_list()
del shape[axis]
cost.set_shape(shape)
if convert_to_float32:
return math_ops.cast(cost, logits.dtype)
else:
return cost
_XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See `tf.nn.softmax_cross_entropy_with_logits_v2`.
"""
@tf_export(v1=["nn.softmax_cross_entropy_with_logits"])
@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)
def softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
dim=-1,
name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `dim` argument specifying the class dimension.
Backpropagation will happen only into `logits`. To calculate a cross entropy
loss that allows backpropagation into both `logits` and `labels`, see
`tf.nn.softmax_cross_entropy_with_logits_v2`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg",
[logits, labels]) as name:
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
return softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, axis=dim, name=name)
@tf_export("nn.sparse_softmax_cross_entropy_with_logits")
def sparse_softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or
`float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
_ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel,
labels, logits)
# TODO(pcmurray) Raise an error when the label is not an index in
# [0, num_classes). Note: This could break users who call this with bad
# labels, but disregard the bad results.
# Reshape logits and labels to rank 2.
with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(
logits.dtype) == dtypes.float16) else logits
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
# Perform a check of the dynamic shapes if the static shapes are not fully
# defined.
shape_checks = []
if not static_shapes_fully_defined:
shape_checks.append(
check_ops.assert_equal(
array_ops.shape(labels),
array_ops.shape(logits)[:-1]))
with ops.control_dependencies(shape_checks):
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]
precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
@tf_export("nn.avg_pool")
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: A list or tuple of 4 ints. The size of the window for each dimension
of the input tensor.
strides: A list or tuple of 4 ints. The stride of the sliding window for
each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.avg_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.max_pool")
def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` of the format specified by `data_format`.
ksize: A list or tuple of 4 ints. The size of the window for each dimension
of the input tensor.
strides: A list or tuple of 4 ints. The stride of the sliding window for
each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.max_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool_with_argmax", v1=[])
def max_pool_with_argmax_v2(input,
ksize,
strides,
padding,
data_format="NHWC",
output_dtype=dtypes.int64,
name=None):
"""Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index
`((b * height + y) * width + x) * channels + c`.
The indices returned are always in `[0, height) x [0, width)` before
flattening, even if padding is involved and the mathematically correct answer
is outside (either negative or too large). This is a bug, but fixing it is
difficult to do in a safe backwards compatible way, especially due to
flattening.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, height, width, channels]`. Input to pool over.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string`, must be set to `"NHWC"`. Defaults to
`"NHWC"`.
Specify the data format of the input and output data.
output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
The dtype of the returned argmax tensor.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, argmax).
output: A `Tensor`. Has the same type as `input`.
argmax: A `Tensor` of type `output_dtype`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
return gen_nn_ops.max_pool_with_argmax(input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=output_dtype,
name=name)
# pylint: enable=redefined-builtin
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export(v1=["nn.xw_plus_b"])
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
# If noise_shape is none return immediately.
if noise_shape is None:
return array_ops.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export(v1=["nn.dropout"])
@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. "
"Rate should be set to `rate = 1 - keep_prob`.",
"keep_prob")
def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,
rate=None): # pylint: disable=invalid-name
"""Computes dropout.
For each element of `x`, with probability `rate`, outputs `0`, and otherwise
scales up the input by `1 / (1-rate)`. The scaling is such that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
keep_prob: (deprecated) A deprecated alias for `(1-rate)`.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
name: A name for this operation (optional).
rate: A scalar `Tensor` with the same type as `x`. The probability that each
element of `x` is discarded.
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating
point tensor.
"""
try:
keep = 1. - keep_prob if keep_prob is not None else None
except TypeError:
raise ValueError("keep_prob must be a floating point number or Tensor "
"(got %r)" % keep_prob)
rate = deprecation.deprecated_argument_lookup(
"rate", rate,
"keep_prob", keep)
if rate is None:
raise ValueError("You must provide a rate to dropout.")
return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
@tf_export("nn.dropout", v1=[])
def dropout_v2(x, rate, noise_shape=None, seed=None, name=None): # pylint: disable=invalid-name
"""Computes dropout.
With probability `rate`, drops elements of `x`. Input that are kept are
scaled up by `1 / (1 - rate)`, otherwise outputs `0`. The scaling is so that
the expected sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
rate: A scalar `Tensor` with the same type as x. The probability
that each element is dropped. For example, setting rate=0.1 would drop
10% of input elements.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]` or if `x` is not a floating
point tensor.
"""
with ops.name_scope(name, "dropout", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if not x.dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going to"
" be scaled. Got a %s tensor instead." % x.dtype)
if isinstance(rate, numbers.Real) and not (rate >= 0 and rate < 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
# Early return if nothing needs to be dropped.
if isinstance(rate, numbers.Real) and rate == 0:
return x
if context.executing_eagerly():
if isinstance(rate, ops.EagerTensor):
if rate.numpy() == 0:
return x
else:
rate = ops.convert_to_tensor(
rate, dtype=x.dtype, name="rate")
rate.get_shape().assert_is_compatible_with(tensor_shape.scalar())
# Do nothing if we know rate == 0
if tensor_util.constant_value(rate) == 0:
return x
noise_shape = _get_noise_shape(x, noise_shape)
# Sample a uniform distribution on [0.0, 1.0) and select values larger than
# rate.
#
# NOTE: Random uniform actually can only generate 2^23 floats on [1.0, 2.0)
# and subtract 1.0.
random_tensor = random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
keep_prob = 1 - rate
scale = 1 / keep_prob
# NOTE: if (1.0 + rate) - 1 is equal to rate, then we want to consider that
# float to be selected, hence we use a >= comparison.
keep_mask = random_tensor >= rate
ret = x * scale * math_ops.cast(keep_mask, x.dtype)
if not context.executing_eagerly():
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k")
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank=1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)
def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
r"""Finds values of the `n`-th order statistic for the last dmension.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the
nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Args:
input: 1-D or higher `Tensor` with last dimension at least `n+1`.
n: A `Tensor` of type `int32`.
0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
reverse: An optional `bool`. Defaults to `False`.
When set to True, find the nth-largest value in the vector and vice
versa.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
The `n`-th order statistic along each last dimensional slice.
"""
return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)
@tf_export(v1=["nn.fractional_max_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_max_pool_v2.")
def fractional_max_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
This is a deprecated version of `fractional_max_pool`.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper: [Benjamin Graham,
Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name)
@tf_export("nn.fractional_max_pool", v1=[])
def fractional_max_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper: [Benjamin Graham,
Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
if seed == 0:
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.fractional_avg_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_avg_pool_v2.")
def fractional_avg_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
This is a deprecated version of `fractional_avg_pool`.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name=name)
@tf_export("nn.fractional_avg_pool", v1=[])
def fractional_avg_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
if seed == 0:
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.conv1d"])
@deprecation.deprecated_arg_values(
None,
"`NCHW` for data_format is deprecated, use `NCW` instead",
warn_once=True,
data_format="NCHW")
@deprecation.deprecated_arg_values(
None,
"`NHWC` for data_format is deprecated, use `NWC` instead",
warn_once=True,
data_format="NHWC")
def conv1d(value,
filters,
stride,
padding,
use_cudnn_on_gpu=None,
data_format=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
value: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `value`.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: 'SAME' or 'VALID'
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from `"NWC", "NCW"`. Defaults
to `"NWC"`, the data is stored in the order of
[batch, in_width, in_channels]. The `"NCW"` format stores
data as [batch, in_channels, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
with ops.name_scope(name, "conv1d", [value, filters]) as name:
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format is None or data_format == "NHWC" or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
strides = [1, 1, stride, 1]
elif data_format == "NCHW" or data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
strides = [1, 1, 1, stride]
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
value = array_ops.expand_dims(value, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
result = gen_nn_ops.conv2d(
value,
filters,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format)
return array_ops.squeeze(result, [spatial_start_dim])
@tf_export("nn.conv1d", v1=[])
def conv1d_v2(input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
data_format=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
input: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `input`.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: 'SAME' or 'VALID'
data_format: An optional `string` from `"NWC", "NCW"`. Defaults
to `"NWC"`, the data is stored in the order of
[batch, in_width, in_channels]. The `"NCW"` format stores
data as [batch, in_channels, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
return conv1d(input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
name=name)
def conv1d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
stride,
padding="SAME",
data_format="NWC",
name=None):
"""The transpose of `conv1d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv1d` rather than an actual
deconvolution.
Args:
value: A 3-D `Tensor` of type `float` and shape
`[batch, in_width, in_channels]` for `NWC` data format or
`[batch, in_channels, in_width]` for `NCW` data format.
filter: A 3-D `Tensor` with the same type as `value` and shape
`[filter_width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv1d_transpose",
[value, filter, output_shape]) as name:
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(3)):
raise ValueError("output_shape must have shape (3,), got {}".format(
output_shape_.get_shape()))
# The format could be either NWC or NCW, map to NHWC or NCHW
if data_format is None or data_format == "NWC":
data_format_2d = "NHWC"
axis = 2
elif data_format == "NCW":
data_format_2d = "NCHW"
axis = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[2]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[2]))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [3] if reached this point.
if not filter.get_shape().dims[1].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[1]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format_2d == "NHWC":
output_shape_ = array_ops.concat(
[output_shape_[:1], [1], output_shape_[1:]], axis=0)
spatial_start_dim = 1
strides = [1, 1, stride, 1]
else:
output_shape_ = array_ops.concat(
[output_shape_[:2], [1], output_shape_[2:]], axis=0)
spatial_start_dim = 2
strides = [1, 1, 1, stride]
value = array_ops.expand_dims(value, spatial_start_dim)
filter = array_ops.expand_dims(filter, 0) # pylint: disable=redefined-builtin
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format_2d,
name=name)
return array_ops.squeeze(result, [spatial_start_dim])
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
"""Calculates the compute resources needed for Dilation2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@tf_export(v1=["nn.erosion2d"])
def erosion2d(value, kernel, strides, rates, padding, name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
kernel[dy, dx, c]
Duality: The erosion of `value` by the `kernel` is equal to the negation of
the dilation of `-value` by the reflected `kernel`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
kernel: A `Tensor`. Must have the same type as `value`.
3-D with shape `[kernel_height, kernel_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `kernel`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(kernel, [0, 1]),
strides=strides,
rates=rates,
padding=padding,
name=name))
@tf_export("nn.erosion2d", v1=[])
def erosion2d_v2(value,
filters,
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filters_height, filters_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - dilations[1] * dy,
strides[2] * x - dilations[2] * dx,
c] -
filters[dy, dx, c]
Duality: The erosion of `value` by the `filters` is equal to the negation of
the dilation of `-value` by the reflected `filters`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `value`.
3-D with shape `[filters_height, filters_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
with ops.name_scope(name, "erosion2d", [value, filters]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(filters, [0, 1]),
strides=strides,
rates=dilations,
padding=padding,
name=name))
@tf_export(v1=["math.in_top_k", "nn.in_top_k"])
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is among the top `k` predictions among
all predictions for example `i`. Note that the behavior of `InTopK` differs
from the `TopK` op in its handling of ties; if multiple classes have the
same prediction value and straddle the top-`k` boundary, all of those
classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: An `int`. Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`.
"""
with ops.name_scope(name, "in_top_k"):
return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)
@tf_export("math.in_top_k", "nn.in_top_k", v1=[])
def in_top_k_v2(targets, predictions, k, name=None):
return in_top_k(predictions, targets, k, name)
in_top_k_v2.__doc__ = in_top_k.__doc__
tf_export(v1=["nn.quantized_avg_pool"])(gen_nn_ops.quantized_avg_pool)
tf_export(v1=["nn.quantized_conv2d"])(gen_nn_ops.quantized_conv2d)
tf_export(v1=["nn.quantized_relu_x"])(gen_nn_ops.quantized_relu_x)
tf_export(v1=["nn.quantized_max_pool"])(gen_nn_ops.quantized_max_pool)
| apache-2.0 |
rajrakeshdr/pychess | testing/suicide.py | 21 | 2145 | # -*- coding: UTF-8 -*-
from __future__ import print_function
import sys
import unittest
from pychess.Utils.const import SUICIDECHESS
from pychess.Utils.logic import validate
from pychess.Utils.Move import Move, parseSAN
from pychess.Variants.suicide import SuicideBoard
# ♚ . . ♔ . . . .
# ♙ . . . . . . .
# . . . . . . . .
# . . . . . . . .
# . . . . . . . .
# . . . . . . . .
# . . . . . . . .
FEN0 = "k2K4/P7/8/8/8/8/8/8 b - - 0 1"
# ♚ ♔ . . . . . .
# ♙ . . . . . . .
# . ♙ . . . . . .
# . . . . . . . .
# . . . . . . . .
# . . . . . . . .
# . . . . . . . .
FEN1 = "kK6/P7/1P6/8/8/8/8/8 b - - 0 1"
# ♚ . . ♔ . . . .
# . ♙ . . . . . .
# . . . . . . . .
# . . . . . . . .
# . . . . . . . .
# . . . . . . . .
# . . . . . . . .
FEN2 = "k2K4/1P6/8/8/8/8/8/8 b - - 0 1"
# ♔ . . . . . . .
# . . . . . . . .
# . . . . . . . .
# . . . . . . . .
# . . . . . . . .
# . . ♚ . . . . .
# . . . . . ♟ . .
# . . . . ♚ . . .
FEN3 = "K7/8/8/8/8/2k5/5p2/4k3 b - - 0 1"
class SuicideTestCase(unittest.TestCase):
def test_validate(self):
"""Testing validate move in Suicide variant"""
#board = SuicideBoard(setup=FEN0)
#print board
#self.assertTrue(validate(board, parseSAN(board, 'Kxa7')))
#self.assertTrue(not validate(board, parseSAN(board, 'Kb8')))
#self.assertTrue(not validate(board, parseSAN(board, 'Kb7')))
#board = SuicideBoard(setup=FEN1)
#print board
#self.assertTrue(validate(board, parseSAN(board, 'Kxa7')))
#self.assertTrue(validate(board, parseSAN(board, 'Kxb8')))
#self.assertTrue(not validate(board, parseSAN(board, 'Kb7')))
#board = SuicideBoard(setup=FEN2)
#print board
#self.assertTrue(not validate(board, parseSAN(board, 'Ka7')))
#self.assertTrue(not validate(board, parseSAN(board, 'Kb8')))
#self.assertTrue(validate(board, parseSAN(board, 'Kxb7')))
board = SuicideBoard(setup=FEN3)
print(board)
self.assertTrue(validate(board, parseSAN(board, 'Ked2')))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/tensorflow/python/framework/common_shapes.py | 32 | 26277 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of common shape functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six.moves
from tensorflow.core.framework import types_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def scalar_shape(unused_op):
"""Shape function for ops that output a scalar value."""
return [tensor_shape.scalar()]
def unchanged_shape(op):
"""Shape function for ops that output an tensor like their first input."""
return [op.inputs[0].get_shape()]
def unchanged_shape_with_rank(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: The exact rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_least(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: A lower bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_least(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_most(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: An upper bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_most(rank)]
return _ShapeFunction
def matmul_shape(op):
"""Shape function for a MatMul op."""
a_shape = op.inputs[0].get_shape().with_rank(2)
transpose_a = op.get_attr("transpose_a")
b_shape = op.inputs[1].get_shape().with_rank(2)
transpose_b = op.get_attr("transpose_b")
output_rows = a_shape[1] if transpose_a else a_shape[0]
output_cols = b_shape[0] if transpose_b else b_shape[1]
inner_a = a_shape[0] if transpose_a else a_shape[1]
inner_b = b_shape[1] if transpose_b else b_shape[0]
inner_a.assert_is_compatible_with(inner_b)
return [tensor_shape.TensorShape([output_rows, output_cols])]
def get_conv_output_size(input_size, filter_size, strides, padding_type):
"""Returns the spatial size of a n-d convolution/pooling output."""
input_size = tuple([tensor_shape.as_dimension(x).value for x in input_size])
filter_size = tuple([tensor_shape.as_dimension(x).value for x in filter_size])
strides = [int(x) for x in strides]
if all(x == 1 for x in input_size) and all(x == 1 for x in filter_size):
return input_size
if any(x is not None and y is not None and x > y for x, y in
zip(filter_size, input_size)):
raise ValueError("Filter must not be larger than the input: "
"Filter: %r Input: %r" % (filter_size, input_size))
if padding_type == b"VALID":
def _valid(in_dim, k_dim, s_dim):
if in_dim is not None and k_dim is not None:
return (in_dim - k_dim + s_dim) // s_dim
else:
return None
output_size = [
_valid(in_dim, k_dim, s_dim)
for in_dim, k_dim, s_dim in zip(input_size, filter_size, strides)
]
elif padding_type == b"SAME":
def _same(in_dim, s_dim):
if in_dim is not None:
return (in_dim + s_dim - 1) // s_dim
else:
return None
output_size = [_same(in_dim, s_dim)
for in_dim, s_dim in zip(input_size, strides)]
else:
raise ValueError("Invalid padding: %r" % padding_type)
return tuple(output_size)
def get2d_conv_output_size(input_height, input_width, filter_height,
filter_width, row_stride, col_stride, padding_type):
"""Returns the number of rows and columns in a convolution/pooling output."""
return get_conv_output_size((input_height, input_width),
(filter_height, filter_width),
(row_stride, col_stride), padding_type)
def conv2d_shape(op):
"""Shape function for a Conv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A Conv2D Operation.
Returns:
A list containing the Shape of the Conv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
if data_format == b"NCHW":
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth_out]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def depthwise_conv2d_native_shape(op):
"""Shape function for a DepthwiseConv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depthwise_multiplier]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_in*depthwise_multiplier], where out_rows and out_cols depend
on the value of the op's "padding" and "strides" attrs.
Args:
op: A DepthwiseConv2dNative Operation.
Returns:
A list containing the Shape of the DepthwiseConv2DNative output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3] * filter_shape[2]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def separable_conv2d_shape(op):
"""Shape function for a SeparableConv2D op.
This op has three inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* depthwise_filter, a 4D tensor with shape = [filter_rows,
filter_cols, depth_in, depth_multiplier]
* pointwise_filter, a 4D tensor with shape = [1, 1, depth_in *
depth_multiplier, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A SeparableConv2D Operation.
Returns:
A list containing the Shape of the SeparableConv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
depthwise_filter_shape = op.inputs[1].get_shape().merge_with(
tensor_shape.TensorShape([None, None, input_shape[3], None]))
pointwise_depth_in = depthwise_filter_shape[2] * depthwise_filter_shape[3]
pointwise_filter_shape = op.inputs[2].get_shape().merge_with(
tensor_shape.TensorShape([1, 1, pointwise_depth_in, None]))
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = depthwise_filter_shape[0]
filter_cols = depthwise_filter_shape[1]
depth_out = pointwise_filter_shape[3]
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def avg_pool_shape(op):
"""Shape function for an AvgPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: An AvgPool Operation.
Returns:
A single-element list containing the Shape of the AvgPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1 or ksize_d != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch and depth dimensions.")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not support strides "
"in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def max_pool_shape(op):
"""Shape function for a MaxPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows, out_cols, and depth_out depend
on the value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: A MaxPool Operation.
Returns:
A single-element list containing the Shape of the MaxPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch dimension.")
if stride_b != 1:
raise ValueError("Current implementation does not support strides "
"in the batch dimension.")
if not ((ksize_r == 1 and ksize_c == 1) or ksize_d == 1):
raise ValueError("MaxPooling supports exactly one of pooling across depth "
"or pooling across width/height.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
if ksize_d == 1:
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
else:
if depth % ksize_d > 0:
raise ValueError("Depthwise max pooling requires the depth window "
"to evenly divide the input depth.")
if stride_d != ksize_d:
raise ValueError("Depthwise max pooling requires the depth window "
"to equal the depth stride.")
output_shape = [batch_size, in_rows, in_cols, depth // ksize_d]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def no_outputs(unused_op):
"""Shape function for use with ops that have no outputs."""
return []
def unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
def broadcast_shape(shape_x, shape_y):
"""Returns the broadcasted shape between `shape_x` and `shape_y`.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
if shape_x.ndims is None or shape_y.ndims is None:
return tensor_shape.unknown_shape()
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return tensor_shape.TensorShape(return_dims)
def call_cpp_shape_fn(op,
input_tensors_needed=None,
input_tensors_as_shapes_needed=None,
debug_python_shape_fn=None,
require_shape_fn=True):
"""A shape function that delegates to the registered C++ shape function.
Args:
op: the node in the graph for which to compute output shapes.
input_tensors_needed: a list of input tensor indices for which to compute
the input tensor's value and pass to the C++ shape function.
input_tensors_as_shapes_needed: a list of input tensor indices for which to
compute the constant_value_as_shape and pass to the C++ shape function.
debug_python_shape_fn: For testing only during migration to using
call_cpp_shape_fn. Do not submit calls that set this,
as the comparison is slow. If non-None, the python shape function;
this function will be called and its output compared to that of
the C++ shape function.
require_shape_fn: If true, and the C++ shape function is not registered
in the current binary then an exception is raised; otherwise, if the
C++ shape function is not registered then unknown_shape is used.
Returns:
A dictionary with the following keys:
shapes: A TensorShape list of the output shapes of the op, as computed
using the C++ shape inference function registered for the op.
handle_shapes: A TensorShape list of the shapes for handle outputs, if
any.
handle_dtypes: A list of DataType enums for the handle outputs, if any.
Raises:
ValueError: If the C++ shape function returned an error (e.g. because the
shapes of the inputs are of the wrong rank or otherwise incompatible
according to the shape function).
RuntimeError: If the C++ shape function is not registered and
<require_shape_fn> is True.
"""
if op.type == "Const":
# To avoid serializing large constants, we special-case constant
# here, even though it has a C++ shape function. When Python
# calls the C / C-API directly, we should be able to remove this.
return {
"shapes": [tensor_shape.TensorShape(op.get_attr("value").tensor_shape)],
"handle_shapes": [tensor_shape.TensorShape(None).as_proto()],
"handle_dtypes": [types_pb2.DT_INVALID]
}
input_tensors_needed = input_tensors_needed or []
input_tensors_as_shapes_needed = input_tensors_as_shapes_needed or []
while True:
res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
input_tensors_as_shapes_needed,
debug_python_shape_fn, require_shape_fn)
if not isinstance(res, dict):
# Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
return res
# See if we need to evaluate some inputs.
if not res["inputs_needed"]:
return res
p = cpp_shape_inference_pb2.CppShapeInferenceInputsNeeded()
p = p.FromString(res["inputs_needed"])
changed = False
for idx in p.input_tensors_needed:
if idx not in input_tensors_needed:
input_tensors_needed.append(idx)
changed = True
for idx in p.input_tensors_as_shapes_needed:
if idx not in input_tensors_as_shapes_needed:
input_tensors_as_shapes_needed.append(idx)
changed = True
if not changed:
return res
def _call_cpp_shape_fn_impl(
op, input_tensors_needed,
input_tensors_as_shapes_needed,
debug_python_shape_fn, require_shape_fn):
"""Core implementaton of call_cpp_shape_fn."""
node_def_str = op.node_def.SerializeToString()
def tensor_to_inference_result(t):
r = cpp_shape_inference_pb2.CppShapeInferenceResult()
r.shape.CopyFrom(t.get_shape().as_proto())
# pylint: disable=protected-access
r.handle_shape.CopyFrom(t._handle_shape)
r.handle_dtype = t._handle_dtype
# pylint: enable=protected-access
return r.SerializeToString()
input_shapes = [tensor_to_inference_result(i) for i in op.inputs]
input_tensors = [None for i in input_shapes]
for idx in input_tensors_needed:
v = tensor_util.constant_value(op.inputs[idx])
if v is not None:
input_tensors[idx] = np.asarray(v)
serialized_unknown_shape = (
tensor_shape.TensorShape(None).as_proto().SerializeToString())
arr = [serialized_unknown_shape for i in input_shapes]
for idx in input_tensors_as_shapes_needed:
s = tensor_util.constant_value_as_shape(op.inputs[idx])
if s is not None:
arr[idx] = s.as_proto().SerializeToString()
input_tensors_as_shapes = arr
missing_shape_fn = False
try:
with errors.raise_exception_on_not_ok_status() as status:
output = pywrap_tensorflow.RunCppShapeInference(
node_def_str, input_shapes, input_tensors, input_tensors_as_shapes,
status)
except errors.InvalidArgumentError as err:
if err.message.startswith("No shape inference function exists for op"):
missing_shape_fn = True
else:
raise ValueError(err.message)
if missing_shape_fn:
if require_shape_fn:
raise RuntimeError(
"No C++ shape function registered for standard op: %s" % op.type)
return unknown_shape(op)
output_shapes = output[:-1]
# Convert TensorShapeProto values in output_shapes.
result_protos = [
cpp_shape_inference_pb2.CppShapeInferenceResult().FromString(s)
for s in output_shapes
]
result = [r.shape for r in result_protos]
result_handle_shapes = [r.handle_shape for r in result_protos]
result_handle_dtypes = [r.handle_dtype for r in result_protos]
if debug_python_shape_fn:
try:
python_result = [tensor_shape.as_shape(s)
for s in debug_python_shape_fn(op)]
except Exception as err:
raise AssertionError("Python shape function return error but "
"C++ shape functon did not: %s" % str(err))
result_as_shapes = [tensor_shape.as_shape(s) for s in result]
if str(result_as_shapes) != str(python_result):
raise ValueError(
("Python vs CPP shape mismatch. "
"CPP: %s vs python: %s on node %s "
"with input shapes %s") % (
str(result_as_shapes), str(python_result), str(op.node_def),
",".join([str(i.get_shape()) for i in op.inputs])))
return {"shapes": result,
"handle_shapes": result_handle_shapes,
"handle_dtypes": result_handle_dtypes,
"inputs_needed": output[-1]}
# pylint: disable=protected-access
ops._set_call_cpp_shape_fn(call_cpp_shape_fn)
# pylint: enable=protected-access
| gpl-3.0 |
huang4fstudio/django | django/utils/decorators.py | 60 | 5921 | "Functions that help with dynamically creating decorators for views."
try:
from contextlib import ContextDecorator
except ImportError:
ContextDecorator = None
from functools import WRAPPER_ASSIGNMENTS, update_wrapper, wraps
from django.utils import six
class classonlymethod(classmethod):
def __get__(self, instance, owner):
if instance is not None:
raise AttributeError("This method is available only on the class, not on instances.")
return super(classonlymethod, self).__get__(instance, owner)
def method_decorator(decorator):
"""
Converts a function decorator into a method decorator
"""
# 'func' is a function at the time it is passed to _dec, but will eventually
# be a method of the class it is defined on.
def _dec(func):
def _wrapper(self, *args, **kwargs):
@decorator
def bound_func(*args2, **kwargs2):
return func.__get__(self, type(self))(*args2, **kwargs2)
# bound_func has the signature that 'decorator' expects i.e. no
# 'self' argument, but it is a closure over self so it can call
# 'func' correctly.
return bound_func(*args, **kwargs)
# In case 'decorator' adds attributes to the function it decorates, we
# want to copy those. We don't have access to bound_func in this scope,
# but we can cheat by using it on a dummy function.
@decorator
def dummy(*args, **kwargs):
pass
update_wrapper(_wrapper, dummy)
# Need to preserve any existing attributes of 'func', including the name.
update_wrapper(_wrapper, func)
return _wrapper
update_wrapper(_dec, decorator, assigned=available_attrs(decorator))
# Change the name to aid debugging.
if hasattr(decorator, '__name__'):
_dec.__name__ = 'method_decorator(%s)' % decorator.__name__
else:
_dec.__name__ = 'method_decorator(%s)' % decorator.__class__.__name__
return _dec
def decorator_from_middleware_with_args(middleware_class):
"""
Like decorator_from_middleware, but returns a function
that accepts the arguments to be passed to the middleware_class.
Use like::
cache_page = decorator_from_middleware_with_args(CacheMiddleware)
# ...
@cache_page(3600)
def my_view(request):
# ...
"""
return make_middleware_decorator(middleware_class)
def decorator_from_middleware(middleware_class):
"""
Given a middleware class (not an instance), returns a view decorator. This
lets you use middleware functionality on a per-view basis. The middleware
is created with no params passed.
"""
return make_middleware_decorator(middleware_class)()
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This is required as a workaround for http://bugs.python.org/issue3445
under Python 2.
"""
if six.PY3:
return WRAPPER_ASSIGNMENTS
else:
return tuple(a for a in WRAPPER_ASSIGNMENTS if hasattr(fn, a))
def make_middleware_decorator(middleware_class):
def _make_decorator(*m_args, **m_kwargs):
middleware = middleware_class(*m_args, **m_kwargs)
def _decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if hasattr(middleware, 'process_request'):
result = middleware.process_request(request)
if result is not None:
return result
if hasattr(middleware, 'process_view'):
result = middleware.process_view(request, view_func, args, kwargs)
if result is not None:
return result
try:
response = view_func(request, *args, **kwargs)
except Exception as e:
if hasattr(middleware, 'process_exception'):
result = middleware.process_exception(request, e)
if result is not None:
return result
raise
if hasattr(response, 'render') and callable(response.render):
if hasattr(middleware, 'process_template_response'):
response = middleware.process_template_response(request, response)
# Defer running of process_response until after the template
# has been rendered:
if hasattr(middleware, 'process_response'):
callback = lambda response: middleware.process_response(request, response)
response.add_post_render_callback(callback)
else:
if hasattr(middleware, 'process_response'):
return middleware.process_response(request, response)
return response
return _wrapped_view
return _decorator
return _make_decorator
if ContextDecorator is None:
# ContextDecorator was introduced in Python 3.2
# See https://docs.python.org/3/library/contextlib.html#contextlib.ContextDecorator
class ContextDecorator(object):
"""
A base class that enables a context manager to also be used as a decorator.
"""
def __call__(self, func):
@wraps(func, assigned=available_attrs(func))
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
class classproperty(object):
def __init__(self, method=None):
self.fget = method
def __get__(self, instance, owner):
return self.fget(owner)
def getter(self, method):
self.fget = method
return self
| bsd-3-clause |
gangadhar-kadam/prjapp | patches/march_2013/p05_payment_reconciliation.py | 30 | 1321 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import webnotes
def execute():
# delete wrong gle entries created due to a bug in make_gl_entries of Account Controller
# when using payment reconciliation
res = webnotes.conn.sql_list("""select distinct gl1.voucher_no
from `tabGL Entry` gl1, `tabGL Entry` gl2
where
date(gl1.modified) >= "2013-03-11"
and date(gl1.modified) = date(gl2.modified)
and gl1.voucher_no = gl2.voucher_no
and gl1.voucher_type = "Journal Voucher"
and gl1.voucher_type = gl2.voucher_type
and gl1.posting_date = gl2.posting_date
and gl1.account = gl2.account
and ifnull(gl1.is_cancelled, 'No') = 'No' and ifnull(gl2.is_cancelled, 'No') = 'No'
and ifnull(gl1.against_voucher, '') = ifnull(gl2.against_voucher, '')
and ifnull(gl1.against_voucher_type, '') = ifnull(gl2.against_voucher_type, '')
and gl1.remarks = gl2.remarks
and ifnull(gl1.debit, 0) = ifnull(gl2.credit, 0)
and ifnull(gl1.credit, 0) = ifnull(gl2.debit, 0)
and gl1.name > gl2.name""")
for r in res:
webnotes.conn.sql("""update `tabGL Entry` set `is_cancelled`='Yes'
where voucher_type='Journal Voucher' and voucher_no=%s""", r)
jv = webnotes.bean("Journal Voucher", r)
jv.run_method("make_gl_entries")
| agpl-3.0 |
40223117cda/2015cdaw13 | static/Brython3.1.3-20150514-095342/Lib/unittest/runner.py | 637 | 7485 | """Running tests"""
import sys
import time
import warnings
from . import result
from .signals import registerResult
__unittest = True
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__(stream, descriptions, verbosity)
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped {0!r}".format(reason))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
class TextTestRunner(object):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
resultclass = TextTestResult
def __init__(self, stream=None, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None, warnings=None):
if stream is None:
stream = sys.stderr
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
self.warnings = warnings
if resultclass is not None:
self.resultclass = resultclass
def _makeResult(self):
return self.resultclass(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
registerResult(result)
result.failfast = self.failfast
result.buffer = self.buffer
with warnings.catch_warnings():
if self.warnings:
# if self.warnings is set, use it to filter all the warnings
warnings.simplefilter(self.warnings)
# if the filter is 'default' or 'always', special-case the
# warnings from the deprecated unittest methods to show them
# no more than once per module, because they can be fairly
# noisy. The -Wd and -Wa flags can be used to bypass this
# only when self.warnings is None.
if self.warnings in ['default', 'always']:
warnings.filterwarnings('module',
category=DeprecationWarning,
message='Please use assert\w+ instead.')
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
except AttributeError:
pass
else:
expectedFails, unexpectedSuccesses, skipped = results
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = len(result.failures), len(result.errors)
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
| gpl-3.0 |
sienatime/python_koans | python3/koans/about_inheritance.py | 100 | 2650 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutInheritance(Koan):
class Dog:
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def bark(self):
return "WOOF"
class Chihuahua(Dog):
def wag(self):
return "happy"
def bark(self):
return "yip"
def test_subclasses_have_the_parent_as_an_ancestor(self):
self.assertEqual(__, issubclass(self.Chihuahua, self.Dog))
def test_this_all_classes_in_python_3_ultimately_inherit_from_object_class(self):
self.assertEqual(__, issubclass(self.Chihuahua, object))
# Note: This isn't the case in Python 2. In that version you have
# to inherit from a built in class or object explicitly
def test_instances_inherit_behavior_from_parent_class(self):
chico = self.Chihuahua("Chico")
self.assertEqual(__, chico.name)
def test_subclasses_add_new_behavior(self):
chico = self.Chihuahua("Chico")
self.assertEqual(__, chico.wag())
fido = self.Dog("Fido")
with self.assertRaises(___): fido.wag()
def test_subclasses_can_modify_existing_behavior(self):
chico = self.Chihuahua("Chico")
self.assertEqual(__, chico.bark())
fido = self.Dog("Fido")
self.assertEqual(__, fido.bark())
# ------------------------------------------------------------------
class BullDog(Dog):
def bark(self):
return super().bark() + ", GRR"
# Note, super() is much simpler to use in Python 3!
def test_subclasses_can_invoke_parent_behavior_via_super(self):
ralph = self.BullDog("Ralph")
self.assertEqual(__, ralph.bark())
# ------------------------------------------------------------------
class GreatDane(Dog):
def growl(self):
return super().bark() + ", GROWL"
def test_super_works_across_methods(self):
george = self.GreatDane("George")
self.assertEqual(__, george.growl())
# ---------------------------------------------------------
class Pug(Dog):
def __init__(self, name):
pass
class Greyhound(Dog):
def __init__(self, name):
super().__init__(name)
def test_base_init_does_not_get_called_automatically(self):
snoopy = self.Pug("Snoopy")
with self.assertRaises(___): name = snoopy.name
def test_base_init_has_to_be_called_explicitly(self):
boxer = self.Greyhound("Boxer")
self.assertEqual(__, boxer.name) | mit |
ZuluPro/libcloud | example_compute.py | 53 | 1403 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
EC2 = get_driver(Provider.EC2)
Rackspace = get_driver(Provider.RACKSPACE)
drivers = [EC2('access key id', 'secret key', region='us-east-1'),
Rackspace('username', 'api key', region='iad')]
nodes = [driver.list_nodes() for driver in drivers]
print(nodes)
# [ <Node: provider=Amazon, status=RUNNING, name=bob, ip=1.2.3.4.5>,
# <Node: provider=Rackspace, status=REBOOT, name=korine, ip=6.7.8.9.10>, ... ]
# grab the node named "test"
node = [n for n in nodes if n.name == 'test'][0]
# reboot "test"
node.reboot()
| apache-2.0 |
wd5/jangr | django/core/urlresolvers.py | 159 | 17094 | """
This module converts requested URLs to callback view functions.
RegexURLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a tuple in this format:
(view_function, function_args, function_kwargs)
"""
import re
from threading import local
from django.http import Http404
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import iri_to_uri, force_unicode, smart_str
from django.utils.functional import memoize
from django.utils.importlib import import_module
from django.utils.regex_helper import normalize
_resolver_cache = {} # Maps URLconf modules to RegexURLResolver instances.
_callable_cache = {} # Maps view and url pattern names to their view functions.
# SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for
# the current thread (which is the only one we ever access), it is assumed to
# be empty.
_prefixes = local()
# Overridden URLconfs for each thread are stored here.
_urlconfs = local()
class ResolverMatch(object):
def __init__(self, func, args, kwargs, url_name=None, app_name=None, namespaces=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.app_name = app_name
if namespaces:
self.namespaces = [x for x in namespaces if x]
else:
self.namespaces = []
if not url_name:
if not hasattr(func, '__name__'):
# An instance of a callable class
url_name = '.'.join([func.__class__.__module__, func.__class__.__name__])
else:
# A function
url_name = '.'.join([func.__module__, func.__name__])
self.url_name = url_name
def namespace(self):
return ':'.join(self.namespaces)
namespace = property(namespace)
def view_name(self):
return ':'.join([ x for x in [ self.namespace, self.url_name ] if x ])
view_name = property(view_name)
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name='%s', app_name='%s', namespace='%s')" % (
self.func, self.args, self.kwargs, self.url_name, self.app_name, self.namespace)
class Resolver404(Http404):
pass
class NoReverseMatch(Exception):
# Don't make this raise an error when used in a template.
silent_variable_failure = True
def get_callable(lookup_view, can_fail=False):
"""
Convert a string version of a function name to the callable object.
If the lookup_view is not an import path, it is assumed to be a URL pattern
label and the original string is returned.
If can_fail is True, lookup_view might be a URL pattern label, so errors
during the import fail and the string is returned.
"""
if not callable(lookup_view):
try:
# Bail early for non-ASCII strings (they can't be functions).
lookup_view = lookup_view.encode('ascii')
mod_name, func_name = get_mod_func(lookup_view)
if func_name != '':
lookup_view = getattr(import_module(mod_name), func_name)
if not callable(lookup_view):
raise AttributeError("'%s.%s' is not a callable." % (mod_name, func_name))
except (ImportError, AttributeError):
if not can_fail:
raise
except UnicodeEncodeError:
pass
return lookup_view
get_callable = memoize(get_callable, _callable_cache, 1)
def get_resolver(urlconf):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
get_resolver = memoize(get_resolver, _resolver_cache, 1)
def get_mod_func(callback):
# Converts 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot+1:]
class RegexURLPattern(object):
def __init__(self, regex, callback, default_args=None, name=None):
# regex is a string representing a regular expression.
# callback is either a string like 'foo.views.news.stories.story_detail'
# which represents the path to a module and a view function name, or a
# callable object (view).
self.regex = re.compile(regex, re.UNICODE)
if callable(callback):
self._callback = callback
else:
self._callback = None
self._callback_str = callback
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern)
def add_prefix(self, prefix):
"""
Adds the prefix string to a string-based callback.
"""
if not prefix or not hasattr(self, '_callback_str'):
return
self._callback_str = prefix + '.' + self._callback_str
def resolve(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
if kwargs:
args = ()
else:
args = match.groups()
# In both cases, pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return ResolverMatch(self.callback, args, kwargs, self.name)
def _get_callback(self):
if self._callback is not None:
return self._callback
try:
self._callback = get_callable(self._callback_str)
except ImportError, e:
mod_name, _ = get_mod_func(self._callback_str)
raise ViewDoesNotExist("Could not import %s. Error was: %s" % (mod_name, str(e)))
except AttributeError, e:
mod_name, func_name = get_mod_func(self._callback_str)
raise ViewDoesNotExist("Tried %s in module %s. Error was: %s" % (func_name, mod_name, str(e)))
return self._callback
callback = property(_get_callback)
class RegexURLResolver(object):
def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
# regex is a string representing a regular expression.
# urlconf_name is a string representing the module containing URLconfs.
self.regex = re.compile(regex, re.UNICODE)
self.urlconf_name = urlconf_name
if not isinstance(urlconf_name, basestring):
self._urlconf_module = self.urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = None
self._namespace_dict = None
self._app_dict = None
def __repr__(self):
return '<%s %s (%s:%s) %s>' % (self.__class__.__name__, self.urlconf_name, self.app_name, self.namespace, self.regex.pattern)
def _populate(self):
lookups = MultiValueDict()
namespaces = {}
apps = {}
for pattern in reversed(self.url_patterns):
p_pattern = pattern.regex.pattern
if p_pattern.startswith('^'):
p_pattern = p_pattern[1:]
if isinstance(pattern, RegexURLResolver):
if pattern.namespace:
namespaces[pattern.namespace] = (p_pattern, pattern)
if pattern.app_name:
apps.setdefault(pattern.app_name, []).append(pattern.namespace)
else:
parent = normalize(pattern.regex.pattern)
for name in pattern.reverse_dict:
for matches, pat in pattern.reverse_dict.getlist(name):
new_matches = []
for piece, p_args in parent:
new_matches.extend([(piece + suffix, p_args + args) for (suffix, args) in matches])
lookups.appendlist(name, (new_matches, p_pattern + pat))
for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
namespaces[namespace] = (p_pattern + prefix, sub_pattern)
for app_name, namespace_list in pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
else:
bits = normalize(p_pattern)
lookups.appendlist(pattern.callback, (bits, p_pattern))
if pattern.name is not None:
lookups.appendlist(pattern.name, (bits, p_pattern))
self._reverse_dict = lookups
self._namespace_dict = namespaces
self._app_dict = apps
def _get_reverse_dict(self):
if self._reverse_dict is None:
self._populate()
return self._reverse_dict
reverse_dict = property(_get_reverse_dict)
def _get_namespace_dict(self):
if self._namespace_dict is None:
self._populate()
return self._namespace_dict
namespace_dict = property(_get_namespace_dict)
def _get_app_dict(self):
if self._app_dict is None:
self._populate()
return self._app_dict
app_dict = property(_get_app_dict)
def resolve(self, path):
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404, e:
sub_tried = e.args[0].get('tried')
if sub_tried is not None:
tried.extend([[pattern] + t for t in sub_tried])
else:
tried.append([pattern])
else:
if sub_match:
sub_match_dict = dict([(smart_str(k), v) for k, v in match.groupdict().items()])
sub_match_dict.update(self.default_kwargs)
for k, v in sub_match.kwargs.iteritems():
sub_match_dict[smart_str(k)] = v
return ResolverMatch(sub_match.func, sub_match.args, sub_match_dict, sub_match.url_name, self.app_name or sub_match.app_name, [self.namespace] + sub_match.namespaces)
tried.append([pattern])
raise Resolver404({'tried': tried, 'path': new_path})
raise Resolver404({'path' : path})
def _get_urlconf_module(self):
try:
return self._urlconf_module
except AttributeError:
self._urlconf_module = import_module(self.urlconf_name)
return self._urlconf_module
urlconf_module = property(_get_urlconf_module)
def _get_url_patterns(self):
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError:
raise ImproperlyConfigured("The included urlconf %s doesn't have any patterns in it" % self.urlconf_name)
return patterns
url_patterns = property(_get_url_patterns)
def _resolve_special(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type, None)
if not callback:
# No handler specified in file; use default
# Lazy import, since urls.defaults imports this file
from django.conf.urls import defaults
callback = getattr(defaults, 'handler%s' % view_type)
try:
return get_callable(callback), {}
except (ImportError, AttributeError), e:
raise ViewDoesNotExist("Tried %s. Error was: %s" % (callback, str(e)))
def resolve404(self):
return self._resolve_special('404')
def resolve500(self):
return self._resolve_special('500')
def reverse(self, lookup_view, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
try:
lookup_view = get_callable(lookup_view, True)
except (ImportError, AttributeError), e:
raise NoReverseMatch("Error importing '%s': %s." % (lookup_view, e))
possibilities = self.reverse_dict.getlist(lookup_view)
for possibility, pattern in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params):
continue
unicode_args = [force_unicode(val) for val in args]
candidate = result % dict(zip(params, unicode_args))
else:
if set(kwargs.keys()) != set(params):
continue
unicode_kwargs = dict([(k, force_unicode(v)) for (k, v) in kwargs.items()])
candidate = result % unicode_kwargs
if re.search(u'^%s' % pattern, candidate, re.UNICODE):
return candidate
# lookup_view can be URL label, or dotted path, or callable, Any of
# these can be passed in at the top, but callables are not friendly in
# error messages.
m = getattr(lookup_view, '__module__', None)
n = getattr(lookup_view, '__name__', None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword "
"arguments '%s' not found." % (lookup_view_s, args, kwargs))
def resolve(path, urlconf=None):
if urlconf is None:
urlconf = get_urlconf()
return get_resolver(urlconf).resolve(path)
def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None, current_app=None):
if urlconf is None:
urlconf = get_urlconf()
resolver = get_resolver(urlconf)
args = args or []
kwargs = kwargs or {}
if prefix is None:
prefix = get_script_prefix()
if not isinstance(viewname, basestring):
view = viewname
else:
parts = viewname.split(':')
parts.reverse()
view = parts[0]
path = parts[1:]
resolved_path = []
while path:
ns = path.pop()
# Lookup the name to see if it could be an app identifier
try:
app_list = resolver.app_dict[ns]
# Yes! Path part matches an app in the current Resolver
if current_app and current_app in app_list:
# If we are reversing for a particular app, use that namespace
ns = current_app
elif ns not in app_list:
# The name isn't shared by one of the instances (i.e., the default)
# so just pick the first instance as the default.
ns = app_list[0]
except KeyError:
pass
try:
extra, resolver = resolver.namespace_dict[ns]
resolved_path.append(ns)
prefix = prefix + extra
except KeyError, key:
if resolved_path:
raise NoReverseMatch("%s is not a registered namespace inside '%s'" % (key, ':'.join(resolved_path)))
else:
raise NoReverseMatch("%s is not a registered namespace" % key)
return iri_to_uri(u'%s%s' % (prefix, resolver.reverse(view,
*args, **kwargs)))
def clear_url_caches():
global _resolver_cache
global _callable_cache
_resolver_cache.clear()
_callable_cache.clear()
def set_script_prefix(prefix):
"""
Sets the script prefix for the current thread.
"""
if not prefix.endswith('/'):
prefix += '/'
_prefixes.value = prefix
def get_script_prefix():
"""
Returns the currently active script prefix. Useful for client code that
wishes to construct their own URLs manually (although accessing the request
instance is normally going to be a lot cleaner).
"""
return getattr(_prefixes, "value", u'/')
def set_urlconf(urlconf_name):
"""
Sets the URLconf for the current thread (overriding the default one in
settings). Set to None to revert back to the default.
"""
if urlconf_name:
_urlconfs.value = urlconf_name
else:
if hasattr(_urlconfs, "value"):
del _urlconfs.value
def get_urlconf(default=None):
"""
Returns the root URLconf to use for the current thread if it has been
changed from the default one.
"""
return getattr(_urlconfs, "value", default)
| bsd-3-clause |
imruahmed/microblog | flask/lib/python2.7/site-packages/migrate/tests/versioning/test_genmodel.py | 78 | 8708 | # -*- coding: utf-8 -*-
import os
import six
import sqlalchemy
from sqlalchemy import *
from migrate.versioning import genmodel, schemadiff
from migrate.changeset import schema
from migrate.tests import fixture
class TestSchemaDiff(fixture.DB):
table_name = 'tmp_schemadiff'
level = fixture.DB.CONNECT
def _setup(self, url):
super(TestSchemaDiff, self)._setup(url)
self.meta = MetaData(self.engine)
self.meta.reflect()
self.meta.drop_all() # in case junk tables are lying around in the test database
self.meta = MetaData(self.engine)
self.meta.reflect() # needed if we just deleted some tables
self.table = Table(self.table_name, self.meta,
Column('id',Integer(), primary_key=True),
Column('name', UnicodeText()),
Column('data', UnicodeText()),
)
def _teardown(self):
if self.table.exists():
self.meta = MetaData(self.engine)
self.meta.reflect()
self.meta.drop_all()
super(TestSchemaDiff, self)._teardown()
def _applyLatestModel(self):
diff = schemadiff.getDiffOfModelAgainstDatabase(self.meta, self.engine, excludeTables=['migrate_version'])
genmodel.ModelGenerator(diff,self.engine).runB2A()
# NOTE(mriedem): DB2 handles UnicodeText as LONG VARGRAPHIC
# so the schema diffs on the columns don't work with this test.
@fixture.usedb(not_supported='ibm_db_sa')
def test_functional(self):
def assertDiff(isDiff, tablesMissingInDatabase, tablesMissingInModel, tablesWithDiff):
diff = schemadiff.getDiffOfModelAgainstDatabase(self.meta, self.engine, excludeTables=['migrate_version'])
self.assertEqual(
(diff.tables_missing_from_B,
diff.tables_missing_from_A,
list(diff.tables_different.keys()),
bool(diff)),
(tablesMissingInDatabase,
tablesMissingInModel,
tablesWithDiff,
isDiff)
)
# Model is defined but database is empty.
assertDiff(True, [self.table_name], [], [])
# Check Python upgrade and downgrade of database from updated model.
diff = schemadiff.getDiffOfModelAgainstDatabase(self.meta, self.engine, excludeTables=['migrate_version'])
decls, upgradeCommands, downgradeCommands = genmodel.ModelGenerator(diff,self.engine).genB2AMigration()
# Feature test for a recent SQLa feature;
# expect different output in that case.
if repr(String()) == 'String()':
self.assertEqualIgnoreWhitespace(decls, '''
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
tmp_schemadiff = Table('tmp_schemadiff', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('name', UnicodeText),
Column('data', UnicodeText),
)
''')
else:
self.assertEqualIgnoreWhitespace(decls, '''
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
tmp_schemadiff = Table('tmp_schemadiff', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('name', UnicodeText(length=None)),
Column('data', UnicodeText(length=None)),
)
''')
# Create table in database, now model should match database.
self._applyLatestModel()
assertDiff(False, [], [], [])
# Check Python code gen from database.
diff = schemadiff.getDiffOfModelAgainstDatabase(MetaData(), self.engine, excludeTables=['migrate_version'])
src = genmodel.ModelGenerator(diff,self.engine).genBDefinition()
namespace = {}
six.exec_(src, namespace)
c1 = Table('tmp_schemadiff', self.meta, autoload=True).c
c2 = namespace['tmp_schemadiff'].c
self.compare_columns_equal(c1, c2, ['type'])
# TODO: get rid of ignoring type
if not self.engine.name == 'oracle':
# Add data, later we'll make sure it's still present.
result = self.engine.execute(self.table.insert(), id=1, name=u'mydata')
dataId = result.inserted_primary_key[0]
# Modify table in model (by removing it and adding it back to model)
# Drop column data, add columns data2 and data3.
self.meta.remove(self.table)
self.table = Table(self.table_name,self.meta,
Column('id',Integer(),primary_key=True),
Column('name',UnicodeText(length=None)),
Column('data2',Integer(),nullable=True),
Column('data3',Integer(),nullable=True),
)
assertDiff(True, [], [], [self.table_name])
# Apply latest model changes and find no more diffs.
self._applyLatestModel()
assertDiff(False, [], [], [])
# Drop column data3, add data4
self.meta.remove(self.table)
self.table = Table(self.table_name,self.meta,
Column('id',Integer(),primary_key=True),
Column('name',UnicodeText(length=None)),
Column('data2',Integer(),nullable=True),
Column('data4',Float(),nullable=True),
)
assertDiff(True, [], [], [self.table_name])
diff = schemadiff.getDiffOfModelAgainstDatabase(
self.meta, self.engine, excludeTables=['migrate_version'])
decls, upgradeCommands, downgradeCommands = genmodel.ModelGenerator(diff,self.engine).genB2AMigration(indent='')
# decls have changed since genBDefinition
six.exec_(decls, namespace)
# migration commands expect a namespace containing migrate_engine
namespace['migrate_engine'] = self.engine
# run the migration up and down
six.exec_(upgradeCommands, namespace)
assertDiff(False, [], [], [])
six.exec_(decls, namespace)
six.exec_(downgradeCommands, namespace)
assertDiff(True, [], [], [self.table_name])
six.exec_(decls, namespace)
six.exec_(upgradeCommands, namespace)
assertDiff(False, [], [], [])
if not self.engine.name == 'oracle':
# Make sure data is still present.
result = self.engine.execute(self.table.select(self.table.c.id==dataId))
rows = result.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0].name, 'mydata')
# Add data, later we'll make sure it's still present.
result = self.engine.execute(self.table.insert(), id=2, name=u'mydata2', data2=123)
dataId2 = result.inserted_primary_key[0]
# Change column type in model.
self.meta.remove(self.table)
self.table = Table(self.table_name,self.meta,
Column('id',Integer(),primary_key=True),
Column('name',UnicodeText(length=None)),
Column('data2',String(255),nullable=True),
)
# XXX test type diff
return
assertDiff(True, [], [], [self.table_name])
# Apply latest model changes and find no more diffs.
self._applyLatestModel()
assertDiff(False, [], [], [])
if not self.engine.name == 'oracle':
# Make sure data is still present.
result = self.engine.execute(self.table.select(self.table.c.id==dataId2))
rows = result.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0].name, 'mydata2')
self.assertEqual(rows[0].data2, '123')
# Delete data, since we're about to make a required column.
# Not even using sqlalchemy.PassiveDefault helps because we're doing explicit column select.
self.engine.execute(self.table.delete(), id=dataId)
if not self.engine.name == 'firebird':
# Change column nullable in model.
self.meta.remove(self.table)
self.table = Table(self.table_name,self.meta,
Column('id',Integer(),primary_key=True),
Column('name',UnicodeText(length=None)),
Column('data2',String(255),nullable=False),
)
assertDiff(True, [], [], [self.table_name]) # TODO test nullable diff
# Apply latest model changes and find no more diffs.
self._applyLatestModel()
assertDiff(False, [], [], [])
# Remove table from model.
self.meta.remove(self.table)
assertDiff(True, [], [self.table_name], [])
| bsd-3-clause |
fhaoquan/kbengine | kbe/res/scripts/common/Lib/email/mime/multipart.py | 480 | 1573 | # Copyright (C) 2002-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Base class for MIME multipart/* type messages."""
__all__ = ['MIMEMultipart']
from email.mime.base import MIMEBase
class MIMEMultipart(MIMEBase):
"""Base class for MIME multipart/* type messages."""
def __init__(self, _subtype='mixed', boundary=None, _subparts=None,
**_params):
"""Creates a multipart/* type message.
By default, creates a multipart/mixed message, with proper
Content-Type and MIME-Version headers.
_subtype is the subtype of the multipart content type, defaulting to
`mixed'.
boundary is the multipart boundary string. By default it is
calculated as needed.
_subparts is a sequence of initial subparts for the payload. It
must be an iterable object, such as a list. You can always
attach new subparts to the message by using the attach() method.
Additional parameters for the Content-Type header are taken from the
keyword arguments (or passed into the _params argument).
"""
MIMEBase.__init__(self, 'multipart', _subtype, **_params)
# Initialise _payload to an empty list as the Message superclass's
# implementation of is_multipart assumes that _payload is a list for
# multipart messages.
self._payload = []
if _subparts:
for p in _subparts:
self.attach(p)
if boundary:
self.set_boundary(boundary)
| lgpl-3.0 |
smblance/ggplot | ggplot/tests/test_legend.py | 12 | 5181 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import get_assert_same_ggplot, cleanup, assert_same_elements
assert_same_ggplot = get_assert_same_ggplot(__file__)
from nose.tools import (assert_true, assert_raises, assert_is,
assert_is_not, assert_equal)
from ggplot import *
import six
import pandas as pd
from ggplot.components import assign_visual_mapping
from ggplot.utils.exceptions import GgplotError
def get_test_df():
df = pd.DataFrame({
'xmin': [1, 3, 5],
'xmax': [2, 3.5, 7],
'ymin': [1, 4, 6],
'ymax': [5, 5, 9],
'fill': ['blue', 'red', 'green'],
'quality': ['good', 'bad', 'ugly'],
'alpha': [0.1, 0.5, 0.9],
'texture': ['hard', 'soft', 'medium']})
return df
def test_legend_structure():
df = get_test_df()
gg = ggplot(df, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df, gg.aesthetics, gg)
# All mapped aesthetics must have an entry in the legend
for aesthetic in ('color', 'fill', 'alpha', 'linetype'):
assert(aesthetic in legend)
# None of the unassigned aesthetic should have an entry in the legend
assert('size' not in legend)
assert('shape' not in legend)
# legend entries should remember the column names
# to which they were mapped
assert(legend['fill']['column_name'] == 'fill')
assert(legend['color']['column_name'] == 'quality')
assert(legend['linetype']['column_name'] == 'texture')
assert(legend['alpha']['column_name'] == 'alpha')
# Discrete columns for non-numeric data
assert(legend['fill']['scale_type'] == 'discrete')
assert(legend['color']['scale_type'] == 'discrete')
assert(legend['linetype']['scale_type'] == 'discrete')
assert(legend['alpha']['scale_type'] == 'discrete')
# Alternate
df2 = pd.DataFrame.copy(df)
df2['fill'] = [90, 3.2, 8.1]
gg = ggplot(df2, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df2, gg.aesthetics, gg)
assert(legend['fill']['scale_type'] == 'discrete')
# Test if legend switches to continuous for more than 8 numerical values
df3 = pd.DataFrame({
'xmin': [1, 3, 5, 8, 2, 1, 4, 7, 9],
'xmax': [2, 3.5, 7, 12, 3, 2, 6, 8, 10],
'ymin': [1, 4, 6, 0, 0, 0, 0, 0, 0],
'ymax': [5, 5, 9, 1, 1, 1, 1, 1, 1],
'fill': ['blue', 'red', 'green', 'green', 'green',
'green', 'green', 'green', 'brown'],
'quality': ['good', 'bad', 'ugly', 'horrible', 'quite awful',
'impertinent', 'jolly', 'hazardous', 'ok'],
'alpha': [0.1, 0.2, 0.4, 0.5, 0.6, 0.65, 0.8, 0.82, 0.83],
'texture': ['hard', 'soft', 'medium', 'fluffy', 'slimy', 'rough',
'edgy', 'corny', 'slanted']
})
gg = ggplot(df2, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df3, gg.aesthetics, gg)
assert(legend['alpha']['scale_type'] == 'continuous')
# Test if legend raises GgplotError when size and alpha is fed non numeric data
gg = ggplot(df3, aes(size="fill"))
assert_raises(GgplotError, assign_visual_mapping, df3, gg.aesthetics, gg)
gg = ggplot(df3, aes(alpha="fill"))
assert_raises(GgplotError, assign_visual_mapping, df3, gg.aesthetics, gg)
@cleanup
def test_alpha_rect():
df = get_test_df()
p = ggplot(df, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
p += geom_rect(size=5)
assert_same_ggplot(p, "legend_alpha_rect")
@cleanup
def test_alpha():
diamonds["test"] = diamonds["clarity"].map(len)
p = ggplot(diamonds[::50], aes(x='carat', y='price', colour='test',
size='test', alpha='test'))
#p = ggplot(diamonds[1:60000:50], aes(x='carat', y='price', shape='clarity'))
p = p + geom_point() + ggtitle("Diamonds: A Plot")
p = p + xlab("Carat") + ylab("Price")
assert_same_ggplot(p, "legend_alpha")
@cleanup
def test_linetype():
meat_lng = pd.melt(meat[['date', 'beef', 'pork', 'broilers']], id_vars='date')
p = ggplot(aes(x='date', y='value', colour='variable',
linetype='variable', shape='variable'), data=meat_lng) + \
geom_line() + geom_point() +\
ylim(0, 3000)
assert_same_ggplot(p, "legend_linetype")
@cleanup
def test_shape_alpha():
diamonds["test"] = diamonds["clarity"].map(len)
df = diamonds[::50]
p = ggplot(df, aes(x='carat', y='price', colour='test', size='test',
alpha='test', shape='clarity')) + geom_point()
assert_same_ggplot(p, "legend_shape_alpha")
| bsd-2-clause |
glenn124f/treeherder | treeherder/model/utils.py | 2 | 1450 | import logging
import random
import time
from _mysql_exceptions import OperationalError
logger = logging.getLogger(__name__)
def get_now_timestamp():
"""
Return a unix timestamp for the current time.
This is useful because it can be mocked out in unit tests.
"""
return int(time.time())
def retry_execute(dhub, logger, retries=0, **kwargs):
"""Retry the query in the case of an OperationalError."""
try:
return dhub.execute(**kwargs)
except OperationalError as e:
if retries < 20:
retries += 1
sleep_time = round(random.random() * .05, 3) # 0 to 50ms
if logger:
logger.info(
"MySQL operational error `{}` hit. Retry #{} in {}s: {}".format(
str(e), retries, sleep_time, kwargs
))
time.sleep(sleep_time)
return retry_execute(dhub, logger, retries, **kwargs)
raise
def orm_delete(model, queryset, chunk_size, sleep_time):
logger.debug("Deleting from %r" % model)
delete_ids = [item['id'] for item in queryset.values('id')]
for lower_bound in xrange(0, len(delete_ids), chunk_size):
model.objects.filter(
id__in=delete_ids[lower_bound:lower_bound+chunk_size]).delete()
if sleep_time:
# Allow some time for other queries to get through
time.sleep(sleep_time)
return delete_ids
| mpl-2.0 |
minhphung171093/GreenERP_V9 | openerp/addons/hw_escpos/controllers/main.py | 4 | 13733 | # -*- coding: utf-8 -*-
import commands
import logging
import json
import os
import os.path
import io
import base64
import openerp
import time
import random
import math
import md5
import openerp.addons.hw_proxy.controllers.main as hw_proxy
import pickle
import re
import subprocess
import traceback
try:
from .. escpos import *
from .. escpos.exceptions import *
from .. escpos.printer import Usb
except ImportError:
escpos = printer = None
from threading import Thread, Lock
from Queue import Queue, Empty
try:
import usb.core
except ImportError:
usb = None
from PIL import Image
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
# workaround https://bugs.launchpad.net/openobject-server/+bug/947231
# related to http://bugs.python.org/issue7980
from datetime import datetime
datetime.strptime('2012-01-01', '%Y-%m-%d')
class EscposDriver(Thread):
def __init__(self):
Thread.__init__(self)
self.queue = Queue()
self.lock = Lock()
self.status = {'status':'connecting', 'messages':[]}
def connected_usb_devices(self):
connected = []
# printers can either define bDeviceClass=7, or they can define one of
# their interfaces with bInterfaceClass=7. This class checks for both.
class FindUsbClass(object):
def __init__(self, usb_class):
self._class = usb_class
def __call__(self, device):
# first, let's check the device
if device.bDeviceClass == self._class:
return True
# transverse all devices and look through their interfaces to
# find a matching class
for cfg in device:
intf = usb.util.find_descriptor(cfg, bInterfaceClass=self._class)
if intf is not None:
return True
return False
printers = usb.core.find(find_all=True, custom_match=FindUsbClass(7))
# Currently we ask customers to put the STAR TSP650II into
# 'USB Mode' Vendor class instead of Printer class. When set
# to Printer class it doesn't show up under Linux at
# all. Vendor class does work, but that means that it's not
# going to have an interfaceClass 7.
if not printers:
printers = usb.core.find(find_all=True, idVendor=0x0519)
for printer in printers:
connected.append({
'vendor': printer.idVendor,
'product': printer.idProduct,
'name': usb.util.get_string(printer, 256, printer.iManufacturer) + " " + usb.util.get_string(printer, 256, printer.iProduct)
})
return connected
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def get_escpos_printer(self):
printers = self.connected_usb_devices()
if len(printers) > 0:
self.set_status('connected','Connected to '+printers[0]['name'])
return Usb(printers[0]['vendor'], printers[0]['product'])
else:
self.set_status('disconnected','Printer Not Found')
return None
def get_status(self):
self.push_task('status')
return self.status
def open_cashbox(self,printer):
printer.cashdraw(2)
printer.cashdraw(5)
def set_status(self, status, message = None):
_logger.info(status+' : '+ (message or 'no message'))
if status == self.status['status']:
if message != None and (len(self.status['messages']) == 0 or message != self.status['messages'][-1]):
self.status['messages'].append(message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('ESC/POS Error: '+message)
elif status == 'disconnected' and message:
_logger.warning('ESC/POS Device Disconnected: '+message)
def run(self):
printer = None
if not escpos:
_logger.error('ESC/POS cannot initialize, please verify system dependencies.')
return
while True:
try:
error = True
timestamp, task, data = self.queue.get(True)
printer = self.get_escpos_printer()
if printer == None:
if task != 'status':
self.queue.put((timestamp,task,data))
error = False
time.sleep(5)
continue
elif task == 'receipt':
if timestamp >= time.time() - 1 * 60 * 60:
self.print_receipt_body(printer,data)
printer.cut()
elif task == 'xml_receipt':
if timestamp >= time.time() - 1 * 60 * 60:
printer.receipt(data)
elif task == 'cashbox':
if timestamp >= time.time() - 12:
self.open_cashbox(printer)
elif task == 'printstatus':
self.print_status(printer)
elif task == 'status':
pass
error = False
except NoDeviceError as e:
print "No device found %s" %str(e)
except HandleDeviceError as e:
print "Impossible to handle the device due to previous error %s" % str(e)
except TicketNotPrinted as e:
print "The ticket does not seems to have been fully printed %s" % str(e)
except NoStatusError as e:
print "Impossible to get the status of the printer %s" % str(e)
except Exception as e:
self.set_status('error', str(e))
errmsg = str(e) + '\n' + '-'*60+'\n' + traceback.format_exc() + '-'*60 + '\n'
_logger.error(errmsg);
finally:
if error:
self.queue.put((timestamp, task, data))
if printer:
printer.close()
def push_task(self,task, data = None):
self.lockedstart()
self.queue.put((time.time(),task,data))
def print_status(self,eprint):
localips = ['0.0.0.0','127.0.0.1','127.0.1.1']
ips = [ c.split(':')[1].split(' ')[0] for c in commands.getoutput("/sbin/ifconfig").split('\n') if 'inet addr' in c ]
ips = [ ip for ip in ips if ip not in localips ]
eprint.text('\n\n')
eprint.set(align='center',type='b',height=2,width=2)
eprint.text('PosBox Status\n')
eprint.text('\n')
eprint.set(align='center')
if len(ips) == 0:
eprint.text('ERROR: Could not connect to LAN\n\nPlease check that the PosBox is correc-\ntly connected with a network cable,\n that the LAN is setup with DHCP, and\nthat network addresses are available')
elif len(ips) == 1:
eprint.text('IP Address:\n'+ips[0]+'\n')
else:
eprint.text('IP Addresses:\n')
for ip in ips:
eprint.text(ip+'\n')
if len(ips) >= 1:
eprint.text('\nHomepage:\nhttp://'+ips[0]+':8069\n')
eprint.text('\n\n')
eprint.cut()
def print_receipt_body(self,eprint,receipt):
def check(string):
return string != True and bool(string) and string.strip()
def price(amount):
return ("{0:."+str(receipt['precision']['price'])+"f}").format(amount)
def money(amount):
return ("{0:."+str(receipt['precision']['money'])+"f}").format(amount)
def quantity(amount):
if math.floor(amount) != amount:
return ("{0:."+str(receipt['precision']['quantity'])+"f}").format(amount)
else:
return str(amount)
def printline(left, right='', width=40, ratio=0.5, indent=0):
lwidth = int(width * ratio)
rwidth = width - lwidth
lwidth = lwidth - indent
left = left[:lwidth]
if len(left) != lwidth:
left = left + ' ' * (lwidth - len(left))
right = right[-rwidth:]
if len(right) != rwidth:
right = ' ' * (rwidth - len(right)) + right
return ' ' * indent + left + right + '\n'
def print_taxes():
taxes = receipt['tax_details']
for tax in taxes:
eprint.text(printline(tax['tax']['name'],price(tax['amount']), width=40,ratio=0.6))
# Receipt Header
if receipt['company']['logo']:
eprint.set(align='center')
eprint.print_base64_image(receipt['company']['logo'])
eprint.text('\n')
else:
eprint.set(align='center',type='b',height=2,width=2)
eprint.text(receipt['company']['name'] + '\n')
eprint.set(align='center',type='b')
if check(receipt['company']['contact_address']):
eprint.text(receipt['company']['contact_address'] + '\n')
if check(receipt['company']['phone']):
eprint.text('Tel:' + receipt['company']['phone'] + '\n')
if check(receipt['company']['vat']):
eprint.text('VAT:' + receipt['company']['vat'] + '\n')
if check(receipt['company']['email']):
eprint.text(receipt['company']['email'] + '\n')
if check(receipt['company']['website']):
eprint.text(receipt['company']['website'] + '\n')
if check(receipt['header']):
eprint.text(receipt['header']+'\n')
if check(receipt['cashier']):
eprint.text('-'*32+'\n')
eprint.text('Served by '+receipt['cashier']+'\n')
# Orderlines
eprint.text('\n\n')
eprint.set(align='center')
for line in receipt['orderlines']:
pricestr = price(line['price_display'])
if line['discount'] == 0 and line['unit_name'] == 'Unit(s)' and line['quantity'] == 1:
eprint.text(printline(line['product_name'],pricestr,ratio=0.6))
else:
eprint.text(printline(line['product_name'],ratio=0.6))
if line['discount'] != 0:
eprint.text(printline('Discount: '+str(line['discount'])+'%', ratio=0.6, indent=2))
if line['unit_name'] == 'Unit(s)':
eprint.text( printline( quantity(line['quantity']) + ' x ' + price(line['price']), pricestr, ratio=0.6, indent=2))
else:
eprint.text( printline( quantity(line['quantity']) + line['unit_name'] + ' x ' + price(line['price']), pricestr, ratio=0.6, indent=2))
# Subtotal if the taxes are not included
taxincluded = True
if money(receipt['subtotal']) != money(receipt['total_with_tax']):
eprint.text(printline('','-------'));
eprint.text(printline(_('Subtotal'),money(receipt['subtotal']),width=40, ratio=0.6))
print_taxes()
#eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))
taxincluded = False
# Total
eprint.text(printline('','-------'));
eprint.set(align='center',height=2)
eprint.text(printline(_(' TOTAL'),money(receipt['total_with_tax']),width=40, ratio=0.6))
eprint.text('\n\n');
# Paymentlines
eprint.set(align='center')
for line in receipt['paymentlines']:
eprint.text(printline(line['journal'], money(line['amount']), ratio=0.6))
eprint.text('\n');
eprint.set(align='center',height=2)
eprint.text(printline(_(' CHANGE'),money(receipt['change']),width=40, ratio=0.6))
eprint.set(align='center')
eprint.text('\n');
# Extra Payment info
if receipt['total_discount'] != 0:
eprint.text(printline(_('Discounts'),money(receipt['total_discount']),width=40, ratio=0.6))
if taxincluded:
print_taxes()
#eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))
# Footer
if check(receipt['footer']):
eprint.text('\n'+receipt['footer']+'\n\n')
eprint.text(receipt['name']+'\n')
eprint.text( str(receipt['date']['date']).zfill(2)
+'/'+ str(receipt['date']['month']+1).zfill(2)
+'/'+ str(receipt['date']['year']).zfill(4)
+' '+ str(receipt['date']['hour']).zfill(2)
+':'+ str(receipt['date']['minute']).zfill(2) )
driver = EscposDriver()
driver.push_task('printstatus')
hw_proxy.drivers['escpos'] = driver
class EscposProxy(hw_proxy.Proxy):
@http.route('/hw_proxy/open_cashbox', type='json', auth='none', cors='*')
def open_cashbox(self):
_logger.info('ESC/POS: OPEN CASHBOX')
driver.push_task('cashbox')
@http.route('/hw_proxy/print_receipt', type='json', auth='none', cors='*')
def print_receipt(self, receipt):
_logger.info('ESC/POS: PRINT RECEIPT')
driver.push_task('receipt',receipt)
@http.route('/hw_proxy/print_xml_receipt', type='json', auth='none', cors='*')
def print_xml_receipt(self, receipt):
_logger.info('ESC/POS: PRINT XML RECEIPT')
driver.push_task('xml_receipt',receipt)
| gpl-3.0 |
mbuhot/mbuhot-euler-solutions | python/problem-081.py | 1 | 1253 | #! /usr/bin/env python3
from memo import memoize
import sys
description = '''
Path sum: two ways
Problem 81
In the 5 by 5 matrix below, the minimal path sum from the top left to the bottom right, by only moving to the right and down, is indicated in bold red and is equal to 2427.
131 673 234 103 18
201 96 342 965 150
630 803 746 422 111
537 699 497 121 956
805 732 524 37 331
Find the minimal path sum, in matrix.txt (right click and 'Save Link/Target As...'), a 31K text file containing a 80 by 80 matrix, from the top left to the bottom right by only moving right and down.
'''
sys.setrecursionlimit(10000)
with open('matrix.txt', 'r') as f:
matrix = [[int(x) for x in line.strip().split(',')] for line in f]
def minPathFromStart(matrix):
@memoize
def minPath(i, j):
neighbours = []
if i < len(matrix) - 1: neighbours.append((i+1, j))
if j < len(matrix[i]) - 1: neighbours.append((i, j+1))
return matrix[i][j] + (min(minPath(ii, jj) for (ii,jj) in neighbours) if len(neighbours) > 0 else 0)
return minPath(0, 0)
test = [
[131, 673, 234, 103, 18],
[201, 96, 342, 965, 150],
[630, 803, 746, 422, 111],
[537, 699, 497, 121, 956],
[805, 732, 524, 37, 331]]
print(minPathFromStart(test))
print(minPathFromStart(matrix))
| mit |
eBrnd/i3pystatus | i3pystatus/weather/wunderground.py | 2 | 8899 | from i3pystatus import IntervalModule
from i3pystatus.core.util import internet, require
from datetime import datetime
from urllib.request import urlopen
import json
import re
GEOLOOKUP_URL = 'http://api.wunderground.com/api/%s/geolookup%s/q/%s.json'
STATION_QUERY_URL = 'http://api.wunderground.com/api/%s/%s/q/%s.json'
class Wunderground(IntervalModule):
'''
This module retrieves weather data using the Weather Underground API.
.. note::
A Weather Underground API key is required to use this module, you can
sign up for a developer API key free at
https://www.wunderground.com/weather/api/
A developer API key is allowed 500 queries per day, and no more than 10
in a given minute. Therefore, it is recommended to be conservative when
setting the update interval.
Valid values for ``location_code`` include:
* **State/City_Name** - CA/San_Francisco
* **Country/City** - France/Paris
* **Geolocation by IP** - autoip
* **Zip or Postal Code** - 60616
* **ICAO Airport Code** - icao:LAX
* **Latitude/Longitude** - 41.8301943,-87.6342619
* **Personal Weather Station (PWS)** - pws:KILCHICA30
When not using a ``pws`` or ``icao`` station ID, the location will be
queried, and the closest station will be used. For a list of PWS
station IDs, visit the following URL:
http://www.wunderground.com/weatherstation/ListStations.asp
.. _weather-usage-wunderground:
.. rubric:: Usage example
.. code-block:: python
from i3pystatus import Status
from i3pystatus.weather import wunderground
status = Status()
status.register(
'weather',
format='{condition} {current_temp}{temp_unit}{icon}[ Hi: {high_temp}] Lo: {low_temp}',
colorize=True,
backend=wunderground.Wunderground(
api_key='dbafe887d56ba4ad',
location_code='pws:MAT645',
units='imperial',
),
)
status.run()
See :ref:`here <weather-formatters>` for a list of formatters which can be
used.
'''
interval = 300
settings = (
('api_key', 'Weather Underground API key'),
('location_code', 'Location code from wunderground.com'),
('units', '\'metric\' or \'imperial\''),
('use_pws', 'Set to False to use only airport stations'),
('forecast', 'Set to ``True`` to check forecast (generates one '
'additional API request per weather update). If set to '
'``False``, then the ``low_temp`` and ``high_temp`` '
'formatters will be set to empty strings.'),
)
required = ('api_key', 'location_code')
api_key = None
location_code = None
units = 'metric'
use_pws = True
forecast = False
# These will be set once weather data has been checked
station_id = None
forecast_url = None
@require(internet)
def api_request(self, url):
'''
Execute an HTTP POST to the specified URL and return the content
'''
with urlopen(url) as content:
try:
content_type = dict(content.getheaders())['Content-Type']
charset = re.search(r'charset=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
response = json.loads(content.read().decode(charset))
try:
raise Exception(response['response']['error']['description'])
except KeyError:
pass
return response
@require(internet)
def geolookup(self):
'''
Use the location_code to perform a geolookup and find the closest
station. If the location is a pws or icao station ID, no lookup will be
peformed.
'''
if self.station_id is None:
try:
for no_lookup in ('pws', 'icao'):
sid = self.location_code.partition(no_lookup + ':')[-1]
if sid:
self.station_id = self.location_code
return
except AttributeError:
# Numeric or some other type, either way we'll just stringify
# it below and perform a lookup.
pass
extra_opts = '/pws:0' if not self.use_pws else ''
api_url = GEOLOOKUP_URL % (self.api_key,
extra_opts,
self.location_code)
response = self.api_request(api_url)
station_type = 'pws' if self.use_pws else 'airport'
try:
stations = response['location']['nearby_weather_stations']
nearest = stations[station_type]['station'][0]
except (KeyError, IndexError):
raise Exception('No locations matched location_code %s'
% self.location_code)
if self.use_pws:
nearest_pws = nearest.get('id', '')
if not nearest_pws:
raise Exception('No id entry for station')
self.station_id = 'pws:%s' % nearest_pws
else:
nearest_airport = nearest.get('icao', '')
if not nearest_airport:
raise Exception('No icao entry for station')
self.station_id = 'icao:%s' % nearest_airport
@require(internet)
def get_forecast(self):
'''
If configured to do so, make an API request to retrieve the forecast
data for the configured/queried weather station, and return the low and
high temperatures. Otherwise, return two empty strings.
'''
if self.forecast:
query_url = STATION_QUERY_URL % (self.api_key,
'forecast',
self.station_id)
try:
response = self.api_request(query_url)['forecast']
response = response['simpleforecast']['forecastday'][0]
except (KeyError, IndexError, TypeError):
raise Exception('No forecast data found for %s' % self.station_id)
unit = 'celsius' if self.units == 'metric' else 'fahrenheit'
low_temp = response.get('low', {}).get(unit, '')
high_temp = response.get('high', {}).get(unit, '')
return low_temp, high_temp
else:
return '', ''
@require(internet)
def weather_data(self):
'''
Query the configured/queried station and return the weather data
'''
# If necessary, do a geolookup to set the station_id
self.geolookup()
query_url = STATION_QUERY_URL % (self.api_key,
'conditions',
self.station_id)
try:
response = self.api_request(query_url)['current_observation']
self.forecast_url = response.pop('ob_url', None)
except KeyError:
raise Exception('No weather data found for %s' % self.station_id)
low_temp, high_temp = self.get_forecast()
if self.units == 'metric':
temp_unit = 'c'
speed_unit = 'kph'
distance_unit = 'km'
pressure_unit = 'mb'
else:
temp_unit = 'f'
speed_unit = 'mph'
distance_unit = 'mi'
pressure_unit = 'in'
def _find(key, data=None):
data = data or response
return data.get(key, 'N/A')
try:
observation_time = int(_find('observation_epoch'))
except TypeError:
observation_time = 0
return dict(
city=_find('city', response['observation_location']),
condition=_find('weather'),
observation_time=datetime.fromtimestamp(observation_time),
current_temp=_find('temp_' + temp_unit),
low_temp=low_temp,
high_temp=high_temp,
temp_unit='°' + temp_unit.upper(),
feelslike=_find('feelslike_' + temp_unit),
dewpoint=_find('dewpoint_' + temp_unit),
wind_speed=_find('wind_' + speed_unit),
wind_unit=speed_unit,
wind_direction=_find('wind_dir'),
wind_gust=_find('wind_gust_' + speed_unit),
pressure=_find('pressure_' + pressure_unit),
pressure_unit=pressure_unit,
pressure_trend=_find('pressure_trend'),
visibility=_find('visibility_' + distance_unit),
visibility_unit=distance_unit,
humidity=_find('relative_humidity').rstrip('%'),
uv_index=_find('uv'),
)
| mit |
akrause2014/dispel4py | dispel4py/examples/graph_testing/partition_parallel_pipeline.py | 4 | 3362 | # Copyright (c) The University of Edinburgh 2014
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This graph is a modification of the :py:mod:`~test.graph_testing.parallel_pipeline` example,
showing how the user can specify how the graph is going to be partitioned into MPI processes.
In this example we are specifying that one MPI process is executing the pipline of nodes ``prod``,
``cons1`` and ``cons2`` and the other MPI processes are executing the remaining node ``cons3``::
graph.partitions = [ [prod, cons1, cons2], [cons3] ]
It can be executed with MPI and Storm. Storm will ignore the partition information.
Execution:
* MPI: Please, locate yourself into the dispel4py directory.
Execute the MPI mapping as follows::
mpiexec -n <number mpi_processes> python -m dispel4py.worker_mpi [-a name_dispel4py_graph] [-f file containing the input dataset in JSON format]
[-i number of iterations/runs'] [-s]
The argument '-s' forces to run the graph in a simple processing, which means that the first node of the graph will be executed in a process, and the rest of nodes will be executed in a second process.
When <-i number of interations/runs> is not indicated, the graph is executed once by default.
For example::
mpiexec -n 3 python -m dispel4py.worker_mpi dispel4py.examples.graph_testing.partition_parallel_pipeline -i 10
Output::
Partitions: [TestProducer0, TestOneInOneOut1, TestOneInOneOut2], [TestOneInOneOut3]
Processes: {'GraphWrapperPE5': [0, 1], 'GraphWrapperPE4': [2]}
GraphWrapperPE5 (rank 0): I'm a bolt
GraphWrapperPE5 (rank 1): I'm a bolt
GraphWrapperPE4 (rank 2): I'm a spout
Rank 2: Sending terminate message to [0, 1]
GraphWrapperPE4 (rank 2): Processed 10 input block(s)
GraphWrapperPE4 (rank 2): Completed.
GraphWrapperPE5 (rank 1): Processed 5 input block(s)
GraphWrapperPE5 (rank 1): Completed.
GraphWrapperPE5 (rank 0): Processed 5 input block(s)
GraphWrapperPE5 (rank 0): Completed.
* STORM:
'''
from dispel4py.examples.graph_testing import testing_PEs as t
from dispel4py.workflow_graph import WorkflowGraph
def testParallelPipeline():
'''
Creates the parallel pipeline graph with partitioning information.
:rtype: the created graph
'''
graph = WorkflowGraph()
prod = t.TestProducer()
prev = prod
cons1 = t.TestOneInOneOut()
cons2 = t.TestOneInOneOut()
cons3 = t.TestOneInOneOut()
graph.connect(prod, 'output', cons1, 'input')
graph.connect(cons1, 'output', cons2, 'input')
graph.connect(cons1, 'output', cons3, 'input')
graph.partitions = [ [prod, cons1, cons2], [cons3] ]
return graph
''' important: this is the graph_variable '''
graph = testParallelPipeline()
| apache-2.0 |
kaixinjxq/crosswalk-test-suite | apptools/apptools-windows-tests/apptools/check_host.py | 15 | 2041 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yun, Liu<yunx.liu@intel.com>
import unittest
import os
import comm
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_check_host_without_platforms(self):
comm.setUp()
os.chdir(comm.XwalkPath)
cmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app check"
(return_code, output) = comm.getstatusoutput(cmd)
self.assertEquals(return_code, 0)
self.assertNotIn("ERROR:", output[0].split('target windows')[1])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
mbruggmann/luigi | test/clone_test.py | 12 | 1870 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest
import luigi
import luigi.notifications
luigi.notifications.DEBUG = True
class LinearSum(luigi.Task):
lo = luigi.IntParameter()
hi = luigi.IntParameter()
def requires(self):
if self.hi > self.lo:
return self.clone(hi=self.hi - 1)
def run(self):
if self.hi > self.lo:
self.s = self.requires().s + self.f(self.hi - 1)
else:
self.s = 0
self.complete = lambda: True # workaround since we don't write any output
def complete(self):
return False
def f(self, x):
return x
class PowerSum(LinearSum):
p = luigi.IntParameter()
def f(self, x):
return x ** self.p
class CloneTest(unittest.TestCase):
def test_args(self):
t = LinearSum(lo=42, hi=45)
self.assertEqual(t.param_args, (42, 45))
self.assertEqual(t.param_kwargs, {'lo': 42, 'hi': 45})
def test_recursion(self):
t = LinearSum(lo=42, hi=45)
luigi.build([t], local_scheduler=True)
self.assertEqual(t.s, 42 + 43 + 44)
def test_inheritance(self):
t = PowerSum(lo=42, hi=45, p=2)
luigi.build([t], local_scheduler=True)
self.assertEqual(t.s, 42 ** 2 + 43 ** 2 + 44 ** 2)
| apache-2.0 |
RNAer/qiita | qiita_pet/test/test_auth_handlers.py | 2 | 2102 | from unittest import main
from qiita_pet.test.tornado_test_base import TestHandlerBase
class TestAuthCreateHandler(TestHandlerBase):
database = True
def test_get(self):
response = self.get('/auth/create/')
self.assertEqual(response.code, 200)
def test_post(self):
post_args = {
'email': 'newuser@foo.bar',
'newpass': 'password'
}
response = self.post('/auth/create/', post_args)
# Make sure page response loaded sucessfully
self.assertEqual(response.code, 200)
class TestAuthVerifyHandler(TestHandlerBase):
def test_get(self):
response = self.get('/auth/verify/SOMETHINGHERE?email=test%40foo.bar')
self.assertEqual(response.code, 200)
class TestAuthLoginHandler(TestHandlerBase):
def test_get(self):
response = self.get('/auth/login/')
self.assertEqual(response.code, 200)
# make sure redirect happened properly
port = self.get_http_port()
self.assertEqual(response.effective_url, 'http://localhost:%d/' % port)
def test_post_correct_pass(self):
post_args = {
'username': 'test@foo.bar',
'passwd': 'password',
'next': '/'
}
response = self.post('/auth/login/', post_args)
self.assertEqual(response.code, 200)
def test_post_wrong_pass(self):
post_args = {
'username': 'test@foo.bar',
'passwd': 'wrongpass',
'next': '/'
}
response = self.post('/auth/login/', post_args)
self.assertEqual(response.code, 200)
def test_set_current_user(self):
# TODO: add proper test for this once figure out how. Issue 567
pass
class TestAuthLogoutHandler(TestHandlerBase):
def test_get(self):
response = self.get('/auth/login/')
self.assertEqual(response.code, 200)
# make sure redirect happened properly
port = self.get_http_port()
self.assertEqual(response.effective_url, 'http://localhost:%d/' % port)
if __name__ == "__main__":
main()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.