repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
chirilo/kuma | vendor/packages/pygments/lexers/automation.py | 72 | 19630 | # -*- coding: utf-8 -*-
"""
pygments.lexers.automation
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for automation scripting languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, combined
from pygments.token import Text, Comment, Operator, Name, String, \
Number, Punctuation, Generic
__all__ = ['AutohotkeyLexer', 'AutoItLexer']
class AutohotkeyLexer(RegexLexer):
"""
For `autohotkey <http://www.autohotkey.com/>`_ source code.
.. versionadded:: 1.4
"""
name = 'autohotkey'
aliases = ['ahk', 'autohotkey']
filenames = ['*.ahk', '*.ahkl']
mimetypes = ['text/x-autohotkey']
tokens = {
'root': [
(r'^(\s*)(/\*)', bygroups(Text, Comment.Multiline), 'incomment'),
(r'^(\s*)(\()', bygroups(Text, Generic), 'incontinuation'),
(r'\s+;.*?$', Comment.Singleline),
(r'^;.*?$', Comment.Singleline),
(r'[]{}(),;[]', Punctuation),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'\%[a-zA-Z_#@$][\w#@$]*\%', Name.Variable),
(r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator),
include('commands'),
include('labels'),
include('builtInFunctions'),
include('builtInVariables'),
(r'"', String, combined('stringescape', 'dqs')),
include('numbers'),
(r'[a-zA-Z_#@$][\w#@$]*', Name),
(r'\\|\'', Text),
(r'\`([,%`abfnrtv\-+;])', String.Escape),
include('garbage'),
],
'incomment': [
(r'^\s*\*/', Comment.Multiline, '#pop'),
(r'[^*/]', Comment.Multiline),
(r'[*/]', Comment.Multiline)
],
'incontinuation': [
(r'^\s*\)', Generic, '#pop'),
(r'[^)]', Generic),
(r'[)]', Generic),
],
'commands': [
(r'(?i)^(\s*)(global|local|static|'
r'#AllowSameLineComments|#ClipboardTimeout|#CommentFlag|'
r'#ErrorStdOut|#EscapeChar|#HotkeyInterval|#HotkeyModifierTimeout|'
r'#Hotstring|#IfWinActive|#IfWinExist|#IfWinNotActive|'
r'#IfWinNotExist|#IncludeAgain|#Include|#InstallKeybdHook|'
r'#InstallMouseHook|#KeyHistory|#LTrim|#MaxHotkeysPerInterval|'
r'#MaxMem|#MaxThreads|#MaxThreadsBuffer|#MaxThreadsPerHotkey|'
r'#NoEnv|#NoTrayIcon|#Persistent|#SingleInstance|#UseHook|'
r'#WinActivateForce|AutoTrim|BlockInput|Break|Click|ClipWait|'
r'Continue|Control|ControlClick|ControlFocus|ControlGetFocus|'
r'ControlGetPos|ControlGetText|ControlGet|ControlMove|ControlSend|'
r'ControlSendRaw|ControlSetText|CoordMode|Critical|'
r'DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|'
r'DriveSpaceFree|Edit|Else|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|'
r'EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|'
r'FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|'
r'FileDelete|FileGetAttrib|FileGetShortcut|FileGetSize|'
r'FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|'
r'FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|'
r'FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|'
r'FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|'
r'GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|'
r'GuiControlGet|Hotkey|IfEqual|IfExist|IfGreaterOrEqual|IfGreater|'
r'IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|'
r'IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|'
r'IfWinNotExist|If |ImageSearch|IniDelete|IniRead|IniWrite|'
r'InputBox|Input|KeyHistory|KeyWait|ListHotkeys|ListLines|'
r'ListVars|Loop|Menu|MouseClickDrag|MouseClick|MouseGetPos|'
r'MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|'
r'PixelSearch|PostMessage|Process|Progress|Random|RegDelete|'
r'RegRead|RegWrite|Reload|Repeat|Return|RunAs|RunWait|Run|'
r'SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|Send|'
r'SetBatchLines|SetCapslockState|SetControlDelay|'
r'SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|'
r'SetMouseDelay|SetNumlockState|SetScrollLockState|'
r'SetStoreCapslockMode|SetTimer|SetTitleMatchMode|'
r'SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|'
r'SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|'
r'SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|'
r'SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|'
r'StringGetPos|StringLeft|StringLen|StringLower|StringMid|'
r'StringReplace|StringRight|StringSplit|StringTrimLeft|'
r'StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|'
r'Transform|TrayTip|URLDownloadToFile|While|WinActivate|'
r'WinActivateBottom|WinClose|WinGetActiveStats|WinGetActiveTitle|'
r'WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinGet|WinHide|'
r'WinKill|WinMaximize|WinMenuSelectItem|WinMinimizeAllUndo|'
r'WinMinimizeAll|WinMinimize|WinMove|WinRestore|WinSetTitle|'
r'WinSet|WinShow|WinWaitActive|WinWaitClose|WinWaitNotActive|'
r'WinWait)\b', bygroups(Text, Name.Builtin)),
],
'builtInFunctions': [
(r'(?i)(Abs|ACos|Asc|ASin|ATan|Ceil|Chr|Cos|DllCall|Exp|FileExist|'
r'Floor|GetKeyState|IL_Add|IL_Create|IL_Destroy|InStr|IsFunc|'
r'IsLabel|Ln|Log|LV_Add|LV_Delete|LV_DeleteCol|LV_GetCount|'
r'LV_GetNext|LV_GetText|LV_Insert|LV_InsertCol|LV_Modify|'
r'LV_ModifyCol|LV_SetImageList|Mod|NumGet|NumPut|OnMessage|'
r'RegExMatch|RegExReplace|RegisterCallback|Round|SB_SetIcon|'
r'SB_SetParts|SB_SetText|Sin|Sqrt|StrLen|SubStr|Tan|TV_Add|'
r'TV_Delete|TV_GetChild|TV_GetCount|TV_GetNext|TV_Get|'
r'TV_GetParent|TV_GetPrev|TV_GetSelection|TV_GetText|TV_Modify|'
r'VarSetCapacity|WinActive|WinExist|Object|ComObjActive|'
r'ComObjArray|ComObjEnwrap|ComObjUnwrap|ComObjParameter|'
r'ComObjType|ComObjConnect|ComObjCreate|ComObjGet|ComObjError|'
r'ComObjValue|Insert|MinIndex|MaxIndex|Remove|SetCapacity|'
r'GetCapacity|GetAddress|_NewEnum|FileOpen|Read|Write|ReadLine|'
r'WriteLine|ReadNumType|WriteNumType|RawRead|RawWrite|Seek|Tell|'
r'Close|Next|IsObject|StrPut|StrGet|Trim|LTrim|RTrim)\b',
Name.Function),
],
'builtInVariables': [
(r'(?i)(A_AhkPath|A_AhkVersion|A_AppData|A_AppDataCommon|'
r'A_AutoTrim|A_BatchLines|A_CaretX|A_CaretY|A_ComputerName|'
r'A_ControlDelay|A_Cursor|A_DDDD|A_DDD|A_DD|A_DefaultMouseSpeed|'
r'A_Desktop|A_DesktopCommon|A_DetectHiddenText|'
r'A_DetectHiddenWindows|A_EndChar|A_EventInfo|A_ExitReason|'
r'A_FormatFloat|A_FormatInteger|A_Gui|A_GuiEvent|A_GuiControl|'
r'A_GuiControlEvent|A_GuiHeight|A_GuiWidth|A_GuiX|A_GuiY|A_Hour|'
r'A_IconFile|A_IconHidden|A_IconNumber|A_IconTip|A_Index|'
r'A_IPAddress1|A_IPAddress2|A_IPAddress3|A_IPAddress4|A_ISAdmin|'
r'A_IsCompiled|A_IsCritical|A_IsPaused|A_IsSuspended|A_KeyDelay|'
r'A_Language|A_LastError|A_LineFile|A_LineNumber|A_LoopField|'
r'A_LoopFileAttrib|A_LoopFileDir|A_LoopFileExt|A_LoopFileFullPath|'
r'A_LoopFileLongPath|A_LoopFileName|A_LoopFileShortName|'
r'A_LoopFileShortPath|A_LoopFileSize|A_LoopFileSizeKB|'
r'A_LoopFileSizeMB|A_LoopFileTimeAccessed|A_LoopFileTimeCreated|'
r'A_LoopFileTimeModified|A_LoopReadLine|A_LoopRegKey|'
r'A_LoopRegName|A_LoopRegSubkey|A_LoopRegTimeModified|'
r'A_LoopRegType|A_MDAY|A_Min|A_MM|A_MMM|A_MMMM|A_Mon|A_MouseDelay|'
r'A_MSec|A_MyDocuments|A_Now|A_NowUTC|A_NumBatchLines|A_OSType|'
r'A_OSVersion|A_PriorHotkey|A_ProgramFiles|A_Programs|'
r'A_ProgramsCommon|A_ScreenHeight|A_ScreenWidth|A_ScriptDir|'
r'A_ScriptFullPath|A_ScriptName|A_Sec|A_Space|A_StartMenu|'
r'A_StartMenuCommon|A_Startup|A_StartupCommon|A_StringCaseSense|'
r'A_Tab|A_Temp|A_ThisFunc|A_ThisHotkey|A_ThisLabel|A_ThisMenu|'
r'A_ThisMenuItem|A_ThisMenuItemPos|A_TickCount|A_TimeIdle|'
r'A_TimeIdlePhysical|A_TimeSincePriorHotkey|A_TimeSinceThisHotkey|'
r'A_TitleMatchMode|A_TitleMatchModeSpeed|A_UserName|A_WDay|'
r'A_WinDelay|A_WinDir|A_WorkingDir|A_YDay|A_YEAR|A_YWeek|A_YYYY|'
r'Clipboard|ClipboardAll|ComSpec|ErrorLevel|ProgramFiles|True|'
r'False|A_IsUnicode|A_FileEncoding|A_OSVersion|A_PtrSize)\b',
Name.Variable),
],
'labels': [
# hotkeys and labels
# technically, hotkey names are limited to named keys and buttons
(r'(^\s*)([^:\s("]+?:{1,2})', bygroups(Text, Name.Label)),
(r'(^\s*)(::[^:\s]+?::)', bygroups(Text, Name.Label)),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'stringescape': [
(r'\"\"|\`([,%`abfnrtv])', String.Escape),
],
'strings': [
(r'[^"\n]+', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings')
],
'garbage': [
(r'[^\S\n]', Text),
# (r'.', Text), # no cheating
],
}
class AutoItLexer(RegexLexer):
"""
For `AutoIt <http://www.autoitscript.com/site/autoit/>`_ files.
AutoIt is a freeware BASIC-like scripting language
designed for automating the Windows GUI and general scripting
.. versionadded:: 1.6
"""
name = 'AutoIt'
aliases = ['autoit']
filenames = ['*.au3']
mimetypes = ['text/x-autoit']
# Keywords, functions, macros from au3.keywords.properties
# which can be found in AutoIt installed directory, e.g.
# c:\Program Files (x86)\AutoIt3\SciTE\au3.keywords.properties
keywords = """\
#include-once #include #endregion #forcedef #forceref #region
and byref case continueloop dim do else elseif endfunc endif
endselect exit exitloop for func global
if local next not or return select step
then to until wend while exit""".split()
functions = """\
abs acos adlibregister adlibunregister asc ascw asin assign atan
autoitsetoption autoitwingettitle autoitwinsettitle beep binary binarylen
binarymid binarytostring bitand bitnot bitor bitrotate bitshift bitxor
blockinput break call cdtray ceiling chr chrw clipget clipput consoleread
consolewrite consolewriteerror controlclick controlcommand controldisable
controlenable controlfocus controlgetfocus controlgethandle controlgetpos
controlgettext controlhide controllistview controlmove controlsend
controlsettext controlshow controltreeview cos dec dircopy dircreate
dirgetsize dirmove dirremove dllcall dllcalladdress dllcallbackfree
dllcallbackgetptr dllcallbackregister dllclose dllopen dllstructcreate
dllstructgetdata dllstructgetptr dllstructgetsize dllstructsetdata
drivegetdrive drivegetfilesystem drivegetlabel drivegetserial drivegettype
drivemapadd drivemapdel drivemapget drivesetlabel drivespacefree
drivespacetotal drivestatus envget envset envupdate eval execute exp
filechangedir fileclose filecopy filecreatentfslink filecreateshortcut
filedelete fileexists filefindfirstfile filefindnextfile fileflush
filegetattrib filegetencoding filegetlongname filegetpos filegetshortcut
filegetshortname filegetsize filegettime filegetversion fileinstall filemove
fileopen fileopendialog fileread filereadline filerecycle filerecycleempty
filesavedialog fileselectfolder filesetattrib filesetpos filesettime
filewrite filewriteline floor ftpsetproxy guicreate guictrlcreateavi
guictrlcreatebutton guictrlcreatecheckbox guictrlcreatecombo
guictrlcreatecontextmenu guictrlcreatedate guictrlcreatedummy
guictrlcreateedit guictrlcreategraphic guictrlcreategroup guictrlcreateicon
guictrlcreateinput guictrlcreatelabel guictrlcreatelist
guictrlcreatelistview guictrlcreatelistviewitem guictrlcreatemenu
guictrlcreatemenuitem guictrlcreatemonthcal guictrlcreateobj
guictrlcreatepic guictrlcreateprogress guictrlcreateradio
guictrlcreateslider guictrlcreatetab guictrlcreatetabitem
guictrlcreatetreeview guictrlcreatetreeviewitem guictrlcreateupdown
guictrldelete guictrlgethandle guictrlgetstate guictrlread guictrlrecvmsg
guictrlregisterlistviewsort guictrlsendmsg guictrlsendtodummy
guictrlsetbkcolor guictrlsetcolor guictrlsetcursor guictrlsetdata
guictrlsetdefbkcolor guictrlsetdefcolor guictrlsetfont guictrlsetgraphic
guictrlsetimage guictrlsetlimit guictrlsetonevent guictrlsetpos
guictrlsetresizing guictrlsetstate guictrlsetstyle guictrlsettip guidelete
guigetcursorinfo guigetmsg guigetstyle guiregistermsg guisetaccelerators
guisetbkcolor guisetcoord guisetcursor guisetfont guisethelp guiseticon
guisetonevent guisetstate guisetstyle guistartgroup guiswitch hex hotkeyset
httpsetproxy httpsetuseragent hwnd inetclose inetget inetgetinfo inetgetsize
inetread inidelete iniread inireadsection inireadsectionnames
inirenamesection iniwrite iniwritesection inputbox int isadmin isarray
isbinary isbool isdeclared isdllstruct isfloat ishwnd isint iskeyword
isnumber isobj isptr isstring log memgetstats mod mouseclick mouseclickdrag
mousedown mousegetcursor mousegetpos mousemove mouseup mousewheel msgbox
number objcreate objcreateinterface objevent objevent objget objname
onautoitexitregister onautoitexitunregister opt ping pixelchecksum
pixelgetcolor pixelsearch pluginclose pluginopen processclose processexists
processgetstats processlist processsetpriority processwait processwaitclose
progressoff progresson progressset ptr random regdelete regenumkey
regenumval regread regwrite round run runas runaswait runwait send
sendkeepactive seterror setextended shellexecute shellexecutewait shutdown
sin sleep soundplay soundsetwavevolume splashimageon splashoff splashtexton
sqrt srandom statusbargettext stderrread stdinwrite stdioclose stdoutread
string stringaddcr stringcompare stringformat stringfromasciiarray
stringinstr stringisalnum stringisalpha stringisascii stringisdigit
stringisfloat stringisint stringislower stringisspace stringisupper
stringisxdigit stringleft stringlen stringlower stringmid stringregexp
stringregexpreplace stringreplace stringright stringsplit stringstripcr
stringstripws stringtoasciiarray stringtobinary stringtrimleft
stringtrimright stringupper tan tcpaccept tcpclosesocket tcpconnect
tcplisten tcpnametoip tcprecv tcpsend tcpshutdown tcpstartup timerdiff
timerinit tooltip traycreateitem traycreatemenu traygetmsg trayitemdelete
trayitemgethandle trayitemgetstate trayitemgettext trayitemsetonevent
trayitemsetstate trayitemsettext traysetclick trayseticon traysetonevent
traysetpauseicon traysetstate traysettooltip traytip ubound udpbind
udpclosesocket udpopen udprecv udpsend udpshutdown udpstartup vargettype
winactivate winactive winclose winexists winflash wingetcaretpos
wingetclasslist wingetclientsize wingethandle wingetpos wingetprocess
wingetstate wingettext wingettitle winkill winlist winmenuselectitem
winminimizeall winminimizeallundo winmove winsetontop winsetstate
winsettitle winsettrans winwait winwaitactive winwaitclose
winwaitnotactive""".split()
macros = """\
@appdatacommondir @appdatadir @autoitexe @autoitpid @autoitversion
@autoitx64 @com_eventobj @commonfilesdir @compiled @computername @comspec
@cpuarch @cr @crlf @desktopcommondir @desktopdepth @desktopdir
@desktopheight @desktoprefresh @desktopwidth @documentscommondir @error
@exitcode @exitmethod @extended @favoritescommondir @favoritesdir
@gui_ctrlhandle @gui_ctrlid @gui_dragfile @gui_dragid @gui_dropid
@gui_winhandle @homedrive @homepath @homeshare @hotkeypressed @hour
@ipaddress1 @ipaddress2 @ipaddress3 @ipaddress4 @kblayout @lf
@logondnsdomain @logondomain @logonserver @mday @min @mon @msec @muilang
@mydocumentsdir @numparams @osarch @osbuild @oslang @osservicepack @ostype
@osversion @programfilesdir @programscommondir @programsdir @scriptdir
@scriptfullpath @scriptlinenumber @scriptname @sec @startmenucommondir
@startmenudir @startupcommondir @startupdir @sw_disable @sw_enable @sw_hide
@sw_lock @sw_maximize @sw_minimize @sw_restore @sw_show @sw_showdefault
@sw_showmaximized @sw_showminimized @sw_showminnoactive @sw_showna
@sw_shownoactivate @sw_shownormal @sw_unlock @systemdir @tab @tempdir
@tray_id @trayiconflashing @trayiconvisible @username @userprofiledir @wday
@windowsdir @workingdir @yday @year""".split()
tokens = {
'root': [
(r';.*\n', Comment.Single),
(r'(#comments-start|#cs).*?(#comments-end|#ce)', Comment.Multiline),
(r'[\[\]{}(),;]', Punctuation),
(r'(and|or|not)\b', Operator.Word),
(r'[$|@][a-zA-Z_]\w*', Name.Variable),
(r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator),
include('commands'),
include('labels'),
include('builtInFunctions'),
include('builtInMarcros'),
(r'"', String, combined('stringescape', 'dqs')),
include('numbers'),
(r'[a-zA-Z_#@$][\w#@$]*', Name),
(r'\\|\'', Text),
(r'\`([,%`abfnrtv\-+;])', String.Escape),
(r'_\n', Text), # Line continuation
include('garbage'),
],
'commands': [
(r'(?i)(\s*)(%s)\b' % '|'.join(keywords),
bygroups(Text, Name.Builtin)),
],
'builtInFunctions': [
(r'(?i)(%s)\b' % '|'.join(functions),
Name.Function),
],
'builtInMarcros': [
(r'(?i)(%s)\b' % '|'.join(macros),
Name.Variable.Global),
],
'labels': [
# sendkeys
(r'(^\s*)(\{\S+?\})', bygroups(Text, Name.Label)),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'stringescape': [
(r'\"\"|\`([,%`abfnrtv])', String.Escape),
],
'strings': [
(r'[^"\n]+', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings')
],
'garbage': [
(r'[^\S\n]', Text),
],
}
| mpl-2.0 |
jgeskens/django | tests/syndication/feeds.py | 58 | 4391 | from __future__ import absolute_import, unicode_literals
from django.contrib.syndication import views
from django.core.exceptions import ObjectDoesNotExist
from django.utils import feedgenerator, tzinfo
from .models import Article, Entry
class ComplexFeed(views.Feed):
def get_object(self, request, foo=None):
if foo is not None:
raise ObjectDoesNotExist
return None
class TestRss2Feed(views.Feed):
title = 'My blog'
description = 'A more thorough description of my blog.'
link = '/blog/'
feed_guid = '/foo/bar/1234'
author_name = 'Sally Smith'
author_email = 'test@example.com'
author_link = 'http://www.example.com/'
categories = ('python', 'django')
feed_copyright = 'Copyright (c) 2007, Sally Smith'
ttl = 600
def items(self):
return Entry.objects.all()
def item_description(self, item):
return "Overridden description: %s" % item
def item_pubdate(self, item):
return item.date
item_author_name = 'Sally Smith'
item_author_email = 'test@example.com'
item_author_link = 'http://www.example.com/'
item_categories = ('python', 'testing')
item_copyright = 'Copyright (c) 2007, Sally Smith'
class TestRss2FeedWithGuidIsPermaLinkTrue(TestRss2Feed):
def item_guid_is_permalink(self, item):
return True
class TestRss2FeedWithGuidIsPermaLinkFalse(TestRss2Feed):
def item_guid(self, item):
return str(item.pk)
def item_guid_is_permalink(self, item):
return False
class TestRss091Feed(TestRss2Feed):
feed_type = feedgenerator.RssUserland091Feed
class TestNoPubdateFeed(views.Feed):
title = 'Test feed'
link = '/feed/'
def items(self):
return Entry.objects.all()
class TestAtomFeed(TestRss2Feed):
feed_type = feedgenerator.Atom1Feed
subtitle = TestRss2Feed.description
class ArticlesFeed(TestRss2Feed):
"""
A feed to test no link being defined. Articles have no get_absolute_url()
method, and item_link() is not defined.
"""
def items(self):
return Article.objects.all()
class TestEnclosureFeed(TestRss2Feed):
pass
class TemplateFeed(TestRss2Feed):
"""
A feed to test defining item titles and descriptions with templates.
"""
title_template = 'syndication/title.html'
description_template = 'syndication/description.html'
# Defining a template overrides any item_title definition
def item_title(self):
return "Not in a template"
class TemplateContextFeed(TestRss2Feed):
"""
A feed to test custom context data in templates for title or description.
"""
title_template = 'syndication/title_context.html'
description_template = 'syndication/description_context.html'
def get_context_data(self, **kwargs):
context = super(TemplateContextFeed, self).get_context_data(**kwargs)
context['foo'] = 'bar'
return context
class NaiveDatesFeed(TestAtomFeed):
"""
A feed with naive (non-timezone-aware) dates.
"""
def item_pubdate(self, item):
return item.date
class TZAwareDatesFeed(TestAtomFeed):
"""
A feed with timezone-aware dates.
"""
def item_pubdate(self, item):
# Provide a weird offset so that the test can know it's getting this
# specific offset and not accidentally getting on from
# settings.TIME_ZONE.
return item.date.replace(tzinfo=tzinfo.FixedOffset(42))
class TestFeedUrlFeed(TestAtomFeed):
feed_url = 'http://example.com/customfeedurl/'
class MyCustomAtom1Feed(feedgenerator.Atom1Feed):
"""
Test of a custom feed generator class.
"""
def root_attributes(self):
attrs = super(MyCustomAtom1Feed, self).root_attributes()
attrs['django'] = 'rocks'
return attrs
def add_root_elements(self, handler):
super(MyCustomAtom1Feed, self).add_root_elements(handler)
handler.addQuickElement('spam', 'eggs')
def item_attributes(self, item):
attrs = super(MyCustomAtom1Feed, self).item_attributes(item)
attrs['bacon'] = 'yum'
return attrs
def add_item_elements(self, handler, item):
super(MyCustomAtom1Feed, self).add_item_elements(handler, item)
handler.addQuickElement('ministry', 'silly walks')
class TestCustomFeed(TestAtomFeed):
feed_type = MyCustomAtom1Feed
| bsd-3-clause |
nthall/pip | tests/scripts/test_all_pip.py | 48 | 3783 | import os
import re
import sys
import subprocess
from os.path import dirname, abspath
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.utils import rmtree
src_folder = dirname(dirname(abspath(__file__)))
if sys.platform == 'win32':
bin_dir = 'Scripts'
else:
bin_dir = 'bin'
def all_projects():
data = urllib_request.urlopen('http://pypi.python.org/simple/').read()
projects = [m.group(1) for m in re.finditer(r'<a.*?>(.+)</a>', data)]
return projects
def main(args=None):
if args is None:
args = sys.argv[1:]
if not args:
print('Usage: test_all_pip.py <output-dir>')
sys.exit(1)
output = os.path.abspath(args[0])
if not os.path.exists(output):
print('Creating %s' % output)
os.makedirs(output)
pending_fn = os.path.join(output, 'pending.txt')
if not os.path.exists(pending_fn):
print('Downloading pending list')
projects = all_projects()
print('Found %s projects' % len(projects))
f = open(pending_fn, 'w')
for name in projects:
f.write(name + '\n')
f.close()
print('Starting testing...')
while os.stat(pending_fn).st_size:
_test_packages(output, pending_fn)
print('Finished all pending!')
def _test_packages(output, pending_fn):
package = get_last_item(pending_fn)
print('Testing package %s' % package)
dest_dir = os.path.join(output, package)
print('Creating virtualenv in %s' % dest_dir)
create_venv(dest_dir)
print('Uninstalling actual pip')
code = subprocess.check_call([
os.path.join(dest_dir, bin_dir, 'pip'),
'uninstall',
'-y',
'pip',
])
assert not code, 'pip uninstallation failed'
print('Installing development pip')
code = subprocess.check_call(
[
os.path.join(dest_dir, bin_dir, 'python'),
'setup.py',
'install'
],
cwd=src_folder,
)
assert not code, 'pip installation failed'
print('Trying installation of %s' % dest_dir)
code = subprocess.check_call([
os.path.join(dest_dir, bin_dir, 'pip'),
'install',
package,
])
if code:
print('Installation of %s failed' % package)
print('Now checking easy_install...')
create_venv(dest_dir)
code = subprocess.check_call([
os.path.join(dest_dir, bin_dir, 'easy_install'),
package,
])
if code:
print('easy_install also failed')
add_package(os.path.join(output, 'easy-failure.txt'), package)
else:
print('easy_install succeeded')
add_package(os.path.join(output, 'failure.txt'), package)
pop_last_item(pending_fn, package)
else:
print('Installation of %s succeeded' % package)
add_package(os.path.join(output, 'success.txt'), package)
pop_last_item(pending_fn, package)
rmtree(dest_dir)
def create_venv(dest_dir):
if os.path.exists(dest_dir):
rmtree(dest_dir)
print('Creating virtualenv in %s' % dest_dir)
code = subprocess.check_call([
'virtualenv',
'--no-site-packages',
dest_dir,
])
assert not code, "virtualenv failed"
def get_last_item(fn):
f = open(fn, 'r')
lines = f.readlines()
f.close()
return lines[-1].strip()
def pop_last_item(fn, line=None):
f = open(fn, 'r')
lines = f.readlines()
f.close()
if line:
assert lines[-1].strip() == line.strip()
lines.pop()
f = open(fn, 'w')
f.writelines(lines)
f.close()
def add_package(filename, package):
f = open(filename, 'a')
f.write(package + '\n')
f.close()
if __name__ == '__main__':
main()
| mit |
alviano/wasp | tests/asp/cautious/3col.example11.cautious.asp.test.py | 1 | 12395 | input = """
1 2 0 0
1 3 0 0
1 4 0 0
1 5 0 0
1 6 0 0
1 7 0 0
1 8 0 0
1 9 0 0
1 10 0 0
1 11 0 0
1 12 0 0
1 13 0 0
1 14 0 0
1 15 0 0
1 16 0 0
1 17 0 0
1 18 0 0
1 19 0 0
1 20 0 0
1 21 0 0
1 22 0 0
1 23 0 0
1 24 0 0
1 25 0 0
1 26 0 0
1 27 0 0
1 28 0 0
1 29 0 0
1 30 0 0
1 31 0 0
1 32 0 0
1 33 0 0
1 34 0 0
1 35 0 0
1 36 0 0
1 37 0 0
1 38 0 0
1 39 0 0
1 40 0 0
1 41 0 0
1 42 0 0
1 43 0 0
1 44 0 0
1 45 0 0
1 46 0 0
1 47 0 0
1 48 0 0
1 49 0 0
1 50 0 0
1 51 0 0
1 52 0 0
1 53 0 0
1 54 0 0
1 55 0 0
1 56 0 0
1 57 0 0
1 58 0 0
1 59 0 0
1 60 0 0
1 61 0 0
1 62 0 0
1 63 0 0
1 64 0 0
1 65 0 0
1 66 0 0
1 67 0 0
1 68 0 0
1 69 0 0
1 70 0 0
1 71 0 0
1 72 0 0
1 73 0 0
1 74 0 0
1 75 0 0
1 76 0 0
1 77 0 0
1 78 0 0
1 79 0 0
1 80 0 0
1 81 0 0
1 82 0 0
1 83 0 0
1 84 0 0
1 85 0 0
1 86 0 0
1 87 0 0
1 88 0 0
1 89 0 0
1 90 0 0
1 91 0 0
1 92 0 0
1 93 0 0
1 94 0 0
1 95 0 0
1 96 0 0
1 97 0 0
1 98 0 0
1 99 0 0
1 100 0 0
1 101 0 0
1 102 0 0
1 103 2 1 104 105
1 104 2 1 103 105
1 105 0 0
1 106 2 1 107 108
1 107 2 1 106 108
1 108 0 0
1 109 2 1 110 111
1 110 2 1 109 111
1 111 0 0
1 112 2 1 113 114
1 113 2 1 112 114
1 114 0 0
1 115 2 1 116 117
1 116 2 1 115 117
1 117 0 0
1 118 2 1 119 120
1 119 2 1 118 120
1 120 0 0
1 121 2 1 122 123
1 122 2 1 121 123
1 123 0 0
1 124 2 1 125 126
1 125 2 1 124 126
1 126 0 0
1 127 2 1 128 129
1 128 2 1 127 129
1 129 0 0
1 130 2 1 131 132
1 131 2 1 130 132
1 132 0 0
1 133 2 1 134 135
1 134 2 1 133 135
1 135 0 0
1 136 2 1 137 138
1 137 2 1 136 138
1 138 0 0
1 139 2 1 140 141
1 140 2 1 139 141
1 141 0 0
1 142 2 1 143 144
1 143 2 1 142 144
1 144 0 0
1 145 2 1 146 147
1 146 2 1 145 147
1 147 0 0
1 148 2 1 149 150
1 149 2 1 148 150
1 150 0 0
1 151 2 1 152 153
1 152 2 1 151 153
1 153 0 0
1 154 2 1 155 156
1 155 2 1 154 156
1 156 0 0
1 157 2 1 158 159
1 158 2 1 157 159
1 159 0 0
1 160 2 1 161 162
1 161 2 1 160 162
1 162 0 0
1 163 2 1 164 165
1 164 2 1 163 165
1 165 0 0
1 166 2 1 167 168
1 167 2 1 166 168
1 168 0 0
1 169 2 1 170 171
1 170 2 1 169 171
1 171 0 0
1 172 2 1 173 174
1 173 2 1 172 174
1 174 0 0
1 175 2 1 176 177
1 176 2 1 175 177
1 177 0 0
1 178 2 1 179 180
1 179 2 1 178 180
1 180 0 0
1 181 2 1 182 183
1 182 2 1 181 183
1 183 0 0
1 184 2 1 185 186
1 185 2 1 184 186
1 186 0 0
1 187 2 1 188 189
1 188 2 1 187 189
1 189 0 0
1 190 2 1 191 192
1 191 2 1 190 192
1 192 0 0
1 193 2 1 194 195
1 194 2 1 193 195
1 195 0 0
1 196 2 1 197 198
1 197 2 1 196 198
1 198 0 0
1 199 2 1 200 201
1 200 2 1 199 201
1 201 0 0
1 202 2 1 203 204
1 203 2 1 202 204
1 204 0 0
1 205 2 1 206 207
1 206 2 1 205 207
1 207 0 0
1 208 2 1 209 210
1 209 2 1 208 210
1 210 0 0
1 211 2 1 212 213
1 212 2 1 211 213
1 213 0 0
1 214 2 1 215 216
1 215 2 1 214 216
1 216 0 0
1 217 2 1 218 219
1 218 2 1 217 219
1 219 0 0
1 220 2 1 221 222
1 221 2 1 220 222
1 222 0 0
1 223 2 1 224 225
1 224 2 1 223 225
1 225 0 0
1 226 2 1 227 228
1 227 2 1 226 228
1 228 0 0
1 229 2 1 230 231
1 230 2 1 229 231
1 231 0 0
1 232 2 1 233 234
1 233 2 1 232 234
1 234 0 0
1 235 2 1 236 237
1 236 2 1 235 237
1 237 0 0
1 238 2 1 239 240
1 239 2 1 238 240
1 240 0 0
1 241 2 1 242 243
1 242 2 1 241 243
1 243 0 0
1 244 2 1 245 246
1 245 2 1 244 246
1 246 0 0
1 247 2 1 248 249
1 248 2 1 247 249
1 249 0 0
1 250 2 1 251 252
1 251 2 1 250 252
1 252 0 0
1 253 2 1 254 255
1 254 2 1 253 255
1 255 0 0
1 256 2 1 257 258
1 257 2 1 256 258
1 258 0 0
1 259 2 1 260 261
1 260 2 1 259 261
1 261 0 0
1 262 2 1 263 264
1 263 2 1 262 264
1 264 0 0
1 265 2 1 266 267
1 266 2 1 265 267
1 267 0 0
1 268 2 1 269 270
1 269 2 1 268 270
1 270 0 0
1 271 2 1 272 273
1 272 2 1 271 273
1 273 0 0
1 274 2 1 275 276
1 275 2 1 274 276
1 276 0 0
1 277 2 1 278 279
1 278 2 1 277 279
1 279 0 0
1 280 2 1 281 282
1 281 2 1 280 282
1 282 0 0
2 283 3 0 2 223 163 103
1 1 1 0 283
2 284 3 0 2 226 166 106
1 1 1 0 284
2 285 3 0 2 229 169 109
1 1 1 0 285
2 286 3 0 2 232 172 112
1 1 1 0 286
2 287 3 0 2 235 175 115
1 1 1 0 287
2 288 3 0 2 238 178 118
1 1 1 0 288
2 289 3 0 2 241 181 121
1 1 1 0 289
2 290 3 0 2 244 184 124
1 1 1 0 290
2 291 3 0 2 247 187 127
1 1 1 0 291
2 292 3 0 2 250 190 130
1 1 1 0 292
2 293 3 0 2 253 193 133
1 1 1 0 293
2 294 3 0 2 256 196 136
1 1 1 0 294
2 295 3 0 2 259 199 139
1 1 1 0 295
2 296 3 0 2 262 202 142
1 1 1 0 296
2 297 3 0 2 265 205 145
1 1 1 0 297
2 298 3 0 2 268 208 148
1 1 1 0 298
2 299 3 0 2 271 211 151
1 1 1 0 299
2 300 3 0 2 274 214 154
1 1 1 0 300
2 301 3 0 2 277 217 157
1 1 1 0 301
2 302 3 0 2 280 220 160
1 1 1 0 302
2 303 3 0 1 223 163 103
1 1 1 1 303
2 304 3 0 1 226 166 106
1 1 1 1 304
2 305 3 0 1 229 169 109
1 1 1 1 305
2 306 3 0 1 232 172 112
1 1 1 1 306
2 307 3 0 1 235 175 115
1 1 1 1 307
2 308 3 0 1 238 178 118
1 1 1 1 308
2 309 3 0 1 241 181 121
1 1 1 1 309
2 310 3 0 1 244 184 124
1 1 1 1 310
2 311 3 0 1 247 187 127
1 1 1 1 311
2 312 3 0 1 250 190 130
1 1 1 1 312
2 313 3 0 1 253 193 133
1 1 1 1 313
2 314 3 0 1 256 196 136
1 1 1 1 314
2 315 3 0 1 259 199 139
1 1 1 1 315
2 316 3 0 1 262 202 142
1 1 1 1 316
2 317 3 0 1 265 205 145
1 1 1 1 317
2 318 3 0 1 268 208 148
1 1 1 1 318
2 319 3 0 1 271 211 151
1 1 1 1 319
2 320 3 0 1 274 214 154
1 1 1 1 320
2 321 3 0 1 277 217 157
1 1 1 1 321
2 322 3 0 1 280 220 160
1 1 1 1 322
1 1 2 0 280 274
1 1 2 0 280 268
1 1 2 0 280 265
1 1 2 0 280 238
1 1 2 0 277 274
1 1 2 0 277 271
1 1 2 0 277 262
1 1 2 0 277 232
1 1 2 0 274 268
1 1 2 0 274 265
1 1 2 0 274 250
1 1 2 0 271 247
1 1 2 0 271 244
1 1 2 0 271 238
1 1 2 0 271 232
1 1 2 0 268 262
1 1 2 0 265 262
1 1 2 0 262 238
1 1 2 0 262 229
1 1 2 0 259 256
1 1 2 0 259 244
1 1 2 0 259 226
1 1 2 0 256 238
1 1 2 0 256 235
1 1 2 0 256 226
1 1 2 0 253 250
1 1 2 0 253 238
1 1 2 0 253 232
1 1 2 0 250 244
1 1 2 0 250 232
1 1 2 0 247 238
1 1 2 0 244 229
1 1 2 0 241 238
1 1 2 0 241 229
1 1 2 0 238 226
1 1 2 0 235 232
1 1 2 0 235 229
1 1 2 0 229 223
1 1 2 0 220 214
1 1 2 0 220 208
1 1 2 0 220 205
1 1 2 0 220 178
1 1 2 0 217 214
1 1 2 0 217 211
1 1 2 0 217 202
1 1 2 0 217 172
1 1 2 0 214 208
1 1 2 0 214 205
1 1 2 0 214 190
1 1 2 0 211 187
1 1 2 0 211 184
1 1 2 0 211 178
1 1 2 0 211 172
1 1 2 0 208 202
1 1 2 0 205 202
1 1 2 0 202 178
1 1 2 0 202 169
1 1 2 0 199 196
1 1 2 0 199 184
1 1 2 0 199 166
1 1 2 0 196 178
1 1 2 0 196 175
1 1 2 0 196 166
1 1 2 0 193 190
1 1 2 0 193 178
1 1 2 0 193 172
1 1 2 0 190 184
1 1 2 0 190 172
1 1 2 0 187 178
1 1 2 0 184 169
1 1 2 0 181 178
1 1 2 0 181 169
1 1 2 0 178 166
1 1 2 0 175 172
1 1 2 0 175 169
1 1 2 0 169 163
1 1 2 0 160 154
1 1 2 0 160 148
1 1 2 0 160 145
1 1 2 0 160 118
1 1 2 0 157 154
1 1 2 0 157 151
1 1 2 0 157 142
1 1 2 0 157 112
1 1 2 0 154 148
1 1 2 0 154 145
1 1 2 0 154 130
1 1 2 0 151 127
1 1 2 0 151 124
1 1 2 0 151 118
1 1 2 0 151 112
1 1 2 0 148 142
1 1 2 0 145 142
1 1 2 0 142 118
1 1 2 0 142 109
1 1 2 0 139 136
1 1 2 0 139 124
1 1 2 0 139 106
1 1 2 0 136 118
1 1 2 0 136 115
1 1 2 0 136 106
1 1 2 0 133 130
1 1 2 0 133 118
1 1 2 0 133 112
1 1 2 0 130 124
1 1 2 0 130 112
1 1 2 0 127 118
1 1 2 0 124 109
1 1 2 0 121 118
1 1 2 0 121 109
1 1 2 0 118 106
1 1 2 0 115 112
1 1 2 0 115 109
1 1 2 0 109 103
0
22 link(1,5)
23 link(5,1)
24 link(1,3)
25 link(3,1)
26 link(2,17)
27 link(17,2)
28 link(2,4)
29 link(4,2)
30 link(3,2)
31 link(2,3)
32 link(3,6)
33 link(6,3)
34 link(4,15)
35 link(15,4)
36 link(4,13)
37 link(13,4)
38 link(5,3)
39 link(3,5)
40 link(5,7)
41 link(7,5)
42 link(6,1)
43 link(1,6)
44 link(6,7)
45 link(7,6)
46 link(7,2)
47 link(2,7)
48 link(7,15)
49 link(15,7)
50 link(8,19)
51 link(19,8)
52 link(8,9)
53 link(9,8)
54 link(9,16)
55 link(16,9)
56 link(9,19)
57 link(19,9)
58 link(10,11)
59 link(11,10)
60 link(10,15)
61 link(15,10)
62 link(11,17)
63 link(17,11)
64 link(11,3)
65 link(3,11)
66 link(12,4)
67 link(4,12)
68 link(12,15)
69 link(15,12)
70 link(13,11)
71 link(11,13)
72 link(13,8)
73 link(8,13)
74 link(14,18)
75 link(18,14)
76 link(14,15)
77 link(15,14)
78 link(15,9)
79 link(9,15)
80 link(15,1)
81 link(1,15)
82 link(16,18)
83 link(18,16)
84 link(16,17)
85 link(17,16)
86 link(17,4)
87 link(4,17)
88 link(17,10)
89 link(10,17)
90 link(18,13)
91 link(13,18)
92 link(18,7)
93 link(7,18)
94 link(19,15)
95 link(15,19)
96 link(20,21)
97 link(21,20)
98 link(20,18)
99 link(18,20)
100 colour(r)
101 colour(b)
102 colour(g)
103 chosenColour(20,g)
106 chosenColour(19,g)
109 chosenColour(18,g)
112 chosenColour(17,g)
115 chosenColour(16,g)
118 chosenColour(15,g)
121 chosenColour(14,g)
124 chosenColour(13,g)
127 chosenColour(12,g)
130 chosenColour(11,g)
133 chosenColour(10,g)
136 chosenColour(9,g)
139 chosenColour(8,g)
142 chosenColour(7,g)
145 chosenColour(6,g)
148 chosenColour(5,g)
151 chosenColour(4,g)
154 chosenColour(3,g)
157 chosenColour(2,g)
160 chosenColour(1,g)
163 chosenColour(20,b)
166 chosenColour(19,b)
169 chosenColour(18,b)
172 chosenColour(17,b)
175 chosenColour(16,b)
178 chosenColour(15,b)
181 chosenColour(14,b)
184 chosenColour(13,b)
187 chosenColour(12,b)
190 chosenColour(11,b)
193 chosenColour(10,b)
196 chosenColour(9,b)
199 chosenColour(8,b)
202 chosenColour(7,b)
205 chosenColour(6,b)
208 chosenColour(5,b)
211 chosenColour(4,b)
214 chosenColour(3,b)
217 chosenColour(2,b)
220 chosenColour(1,b)
223 chosenColour(20,r)
226 chosenColour(19,r)
229 chosenColour(18,r)
232 chosenColour(17,r)
235 chosenColour(16,r)
238 chosenColour(15,r)
241 chosenColour(14,r)
244 chosenColour(13,r)
247 chosenColour(12,r)
250 chosenColour(11,r)
253 chosenColour(10,r)
256 chosenColour(9,r)
259 chosenColour(8,r)
262 chosenColour(7,r)
265 chosenColour(6,r)
268 chosenColour(5,r)
271 chosenColour(4,r)
274 chosenColour(3,r)
277 chosenColour(2,r)
280 chosenColour(1,r)
2 node(1)
3 node(2)
4 node(3)
5 node(4)
6 node(5)
7 node(6)
8 node(7)
9 node(8)
10 node(9)
11 node(10)
12 node(11)
13 node(12)
14 node(13)
15 node(14)
16 node(15)
17 node(16)
18 node(17)
19 node(18)
20 node(19)
21 node(20)
104 notChosenColour(20,g)
107 notChosenColour(19,g)
110 notChosenColour(18,g)
113 notChosenColour(17,g)
116 notChosenColour(16,g)
119 notChosenColour(15,g)
122 notChosenColour(14,g)
125 notChosenColour(13,g)
128 notChosenColour(12,g)
131 notChosenColour(11,g)
134 notChosenColour(10,g)
137 notChosenColour(9,g)
140 notChosenColour(8,g)
143 notChosenColour(7,g)
146 notChosenColour(6,g)
149 notChosenColour(5,g)
152 notChosenColour(4,g)
155 notChosenColour(3,g)
158 notChosenColour(2,g)
161 notChosenColour(1,g)
164 notChosenColour(20,b)
167 notChosenColour(19,b)
170 notChosenColour(18,b)
173 notChosenColour(17,b)
176 notChosenColour(16,b)
179 notChosenColour(15,b)
182 notChosenColour(14,b)
185 notChosenColour(13,b)
188 notChosenColour(12,b)
191 notChosenColour(11,b)
194 notChosenColour(10,b)
197 notChosenColour(9,b)
200 notChosenColour(8,b)
203 notChosenColour(7,b)
206 notChosenColour(6,b)
209 notChosenColour(5,b)
212 notChosenColour(4,b)
215 notChosenColour(3,b)
218 notChosenColour(2,b)
221 notChosenColour(1,b)
224 notChosenColour(20,r)
227 notChosenColour(19,r)
230 notChosenColour(18,r)
233 notChosenColour(17,r)
236 notChosenColour(16,r)
239 notChosenColour(15,r)
242 notChosenColour(14,r)
245 notChosenColour(13,r)
248 notChosenColour(12,r)
251 notChosenColour(11,r)
254 notChosenColour(10,r)
257 notChosenColour(9,r)
260 notChosenColour(8,r)
263 notChosenColour(7,r)
266 notChosenColour(6,r)
269 notChosenColour(5,r)
272 notChosenColour(4,r)
275 notChosenColour(3,r)
278 notChosenColour(2,r)
281 notChosenColour(1,r)
0
B+
0
B-
1
0
1
"""
output = """
{node(1), node(2), node(3), node(4), node(5), node(6), node(7), node(8), node(9), node(10), node(11), node(12), node(13), node(14), node(15), node(16), node(17), node(18), node(19), node(20), link(1,5), link(5,1), link(1,3), link(3,1), link(2,17), link(17,2), link(2,4), link(4,2), link(3,2), link(2,3), link(3,6), link(6,3), link(4,15), link(15,4), link(4,13), link(13,4), link(5,3), link(3,5), link(5,7), link(7,5), link(6,1), link(1,6), link(6,7), link(7,6), link(7,2), link(2,7), link(7,15), link(15,7), link(8,19), link(19,8), link(8,9), link(9,8), link(9,16), link(16,9), link(9,19), link(19,9), link(10,11), link(11,10), link(10,15), link(15,10), link(11,17), link(17,11), link(11,3), link(3,11), link(12,4), link(4,12), link(12,15), link(15,12), link(13,11), link(11,13), link(13,8), link(8,13), link(14,18), link(18,14), link(14,15), link(15,14), link(15,9), link(9,15), link(15,1), link(1,15), link(16,18), link(18,16), link(16,17), link(17,16), link(17,4), link(4,17), link(17,10), link(10,17), link(18,13), link(13,18), link(18,7), link(7,18), link(19,15), link(15,19), link(20,21), link(21,20), link(20,18), link(18,20), colour(r), colour(b), colour(g)}
"""
| apache-2.0 |
basnijholt/holoviews | holoviews/tests/core/testcollation.py | 2 | 3718 | """
Test cases for Collator
"""
import itertools
import numpy as np
from holoviews.core import Collator, HoloMap, NdOverlay, Overlay, GridSpace
from holoviews.element import Curve
from holoviews.element.comparison import ComparisonTestCase
class TestCollation(ComparisonTestCase):
def setUp(self):
alphas, betas, deltas = 2, 2, 2
Bs = list(range(100))
coords = itertools.product(*(range(n) for n in [alphas, betas, deltas]))
mus=np.random.rand(alphas, betas, 100, 10)
self.phase_boundaries = {(a, b, d): Curve(zip(Bs, mus[a, b, :, i]*a+b))
for i in range(10) for a, b, d in coords}
self.dimensions = ['alpha', 'beta', 'delta']
self.nesting_hmap = HoloMap(self.phase_boundaries, kdims=self.dimensions)
self.nested_hmap = self.nesting_hmap.groupby(['alpha'])
self.nested_overlay = self.nesting_hmap.overlay(['delta'])
self.nested_grid = self.nested_overlay.grid(['alpha', 'beta'])
self.nested_layout = self.nested_overlay.layout(['alpha', 'beta'])
def test_collate_hmap(self):
collated = self.nested_hmap.collate()
self.assertEqual(collated.kdims, self.nesting_hmap.kdims)
self.assertEqual(collated.keys(), self.nesting_hmap.keys())
self.assertEqual(collated.type, self.nesting_hmap.type)
self.assertEqual(repr(collated), repr(self.nesting_hmap))
def test_collate_ndoverlay(self):
collated = self.nested_overlay.collate(NdOverlay)
ndoverlay = NdOverlay(self.phase_boundaries, kdims=self.dimensions)
self.assertEqual(collated.kdims, ndoverlay.kdims)
self.assertEqual(collated.keys(), ndoverlay.keys())
self.assertEqual(repr(collated), repr(ndoverlay))
def test_collate_gridspace_ndoverlay(self):
grid = self.nesting_hmap.groupby(['delta']).collate(NdOverlay).grid(['alpha', 'beta'])
self.assertEqual(grid.dimensions(), self.nested_grid.dimensions())
self.assertEqual(grid.keys(), self.nested_grid.keys())
self.assertEqual(repr(grid), repr(self.nested_grid))
def test_collate_ndlayout_ndoverlay(self):
layout = self.nesting_hmap.groupby(['delta']).collate(NdOverlay).layout(['alpha', 'beta'])
self.assertEqual(layout.dimensions(), self.nested_layout.dimensions())
self.assertEqual(layout.keys(), self.nested_layout.keys())
self.assertEqual(repr(layout), repr(self.nested_layout))
def test_collate_layout_overlay(self):
layout = self.nested_overlay + self.nested_overlay
collated = Collator(kdims=['alpha', 'beta'])
for k, v in self.nested_overlay.items():
collated[k] = v + v
collated = collated()
self.assertEqual(collated.dimensions(), layout.dimensions())
def test_collate_layout_hmap(self):
layout = self.nested_overlay + self.nested_overlay
collated = Collator(kdims=['delta'], merge_type=NdOverlay)
for k, v in self.nesting_hmap.groupby(['delta']).items():
collated[k] = v + v
collated = collated()
self.assertEqual(repr(collated), repr(layout))
self.assertEqual(collated.dimensions(), layout.dimensions())
def test_overlay_hmap_collate(self):
hmap = HoloMap({i: Curve(np.arange(10)*i) for i in range(3)})
overlaid = Overlay([hmap, hmap, hmap]).collate()
self.assertEqual(overlaid, hmap*hmap*hmap)
def test_overlay_gridspace_collate(self):
grid = GridSpace({(i,j): Curve(np.arange(10)*i) for i in range(3)
for j in range(3)})
overlaid = Overlay([grid, grid, grid]).collate()
self.assertEqual(overlaid, grid*grid*grid)
| bsd-3-clause |
indictranstech/erpnext | erpnext/patches/v7_0/remove_features_setup.py | 41 | 1251 | import frappe
from erpnext.setup.install import create_compact_item_print_custom_field
from frappe.utils import cint
def execute():
frappe.reload_doctype('Stock Settings')
stock_settings = frappe.get_doc('Stock Settings', 'Stock Settings')
stock_settings.show_barcode_field = cint(frappe.db.get_value("Features Setup", None, "fs_item_barcode"))
if not frappe.db.exists("UOM", stock_settings.stock_uom):
stock_settings.stock_uom = None
stock_settings.save()
create_compact_item_print_custom_field()
compact_item_print = frappe.db.get_value("Features Setup", None, "compact_item_print")
frappe.db.set_value("Print Settings", None, "compact_item_print", compact_item_print)
# remove defaults
frappe.db.sql("""delete from tabDefaultValue where defkey in ('fs_item_serial_nos',
'fs_item_batch_nos', 'fs_brands', 'fs_item_barcode',
'fs_item_advanced', 'fs_packing_details', 'fs_item_group_in_details',
'fs_exports', 'fs_imports', 'fs_discounts', 'fs_purchase_discounts',
'fs_after_sales_installations', 'fs_projects', 'fs_sales_extras',
'fs_recurring_invoice', 'fs_pos', 'fs_manufacturing', 'fs_quality',
'fs_page_break', 'fs_more_info', 'fs_pos_view', 'compact_item_print')""")
frappe.delete_doc('DocType', 'Features Setup')
| agpl-3.0 |
mayankjohri/LetsExplorePython | Section 2 - Advance Python/Chapter S2.06 - Web Development/code/old/restful_webapi.py | 4 | 1198 | import web
import json
import threading
urls = ('/(.*)', 'API')
app = web.application(urls, globals())
db = {}
nextid = 0
# Very simple REST API application built with web.py
class API():
def GET(self, id=None):
global db, nextid
if(len(id) == 0):
return json.dumps(db)
elif(int(id) in db):
return json.dumps(db[int(id)])
else:
return web.notfound()
def POST(self, id=None):
global db, nextid
db[nextid] = json.loads(web.data())
nextid += 1
return json.dumps({'created': nextid - 1})
def DELETE(self, id):
global db, nextid
if(int(id) in db):
db.pop(int(id))
return json.dumps({'deleted': int(id)})
else:
return web.notfound()
def PUT(self, id):
global db, nextid
if(int(id) in db):
db[int(id)] = json.loads(web.data())
return json.dumps({'updated': int(id)})
else:
return web.notfound()
def run_server():
thread = threading.Thread(target = app.run)
thread.start()
return thread
| gpl-3.0 |
Ghini/ghini.desktop | bauble/db.py | 1 | 30371 | # -*- coding: utf-8 -*-
#
# Copyright 2005-2010 Brett Adams <brett@belizebotanic.org>
# Copyright 2015-2017 Mario Frasca <mario@anche.no>.
# Copyright 2017 Jardín Botánico de Quito
# Copyright 2018 Ilja Everilä
#
# This file is part of ghini.desktop.
#
# ghini.desktop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ghini.desktop is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ghini.desktop. If not, see <http://www.gnu.org/licenses/>.
import gi
gi.require_version('Gtk', '3.0')
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
from sqlalchemy.orm import class_mapper
import datetime
import os
import re
import bauble.error as error
import json
from bauble.utils import parse_date
try:
import sqlalchemy as sa
parts = tuple(int(i) for i in sa.__version__.split('.')[:2])
if parts < (0, 6):
msg = _('This version of Ghini requires SQLAlchemy 0.6 or greater. '
'You are using version %s. '
'Please download and install a newer version of SQLAlchemy '
'from http://www.sqlalchemy.org or contact your system '
'administrator.') % '.'.join(parts)
raise error.SQLAlchemyVersionError(msg)
except ImportError:
msg = _('SQLAlchemy not installed. Please install SQLAlchemy from '
'http://www.sqlalchemy.org')
raise
from gi.repository import Gtk
import sqlalchemy.orm as orm
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
import bauble.btypes as types
import bauble.utils as utils
def sqlalchemy_debug(verbose):
if verbose:
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
logging.getLogger('sqlalchemy.orm.unitofwork').setLevel(logging.DEBUG)
else:
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARN)
logging.getLogger('sqlalchemy.orm.unitofwork').setLevel(logging.WARN)
SQLALCHEMY_DEBUG = False
sqlalchemy_debug(SQLALCHEMY_DEBUG)
def get_or_create(session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance
else:
instance = model(**kwargs)
session.add(instance)
session.flush()
return instance
def natsort(attr, obj):
"""return the naturally sorted list of the object attribute
meant to be curried. the main role of this function is to invert
the order in which the function getattr receives its arguments.
attr is in the form <attribute> but can also specify a path from the
object to the attribute, like <a1>.<a2>.<a3>, in which case each
step should return a single database object until the last step
where the result should be a list of objects.
e.g.:
from functools import partial
partial(natsort, 'accessions')(species)
partial(natsort, 'species.accessions')(vern_name)
"""
from bauble import utils
jumps = attr.split('.')
for attr in jumps:
obj = getattr(obj, attr)
return sorted(obj, key=utils.natsort_key)
class HistoryExtension(orm.MapperExtension):
"""
HistoryExtension is a
:class:`~sqlalchemy.orm.interfaces.MapperExtension` that is added
to all clases that inherit from bauble.db.Base so that all
inserts, updates, and deletes made to the mapped objects are
recorded in the `history` table.
"""
def _add(self, operation, mapper, connection, instance):
"""
Add a new entry to the history table.
"""
user = current_user()
row = {}
for c in mapper.local_table.c:
row[c.name] = utils.utf8(getattr(instance, c.name))
table = History.__table__
stmt = table.insert(dict(table_name=mapper.local_table.name,
table_id=instance.id, values=str(row),
operation=operation, user=user,
timestamp=datetime.datetime.today()))
connection.execute(stmt)
def after_update(self, mapper, connection, instance):
self._add('update', mapper, connection, instance)
def after_insert(self, mapper, connection, instance):
self._add('insert', mapper, connection, instance)
def after_delete(self, mapper, connection, instance):
self._add('delete', mapper, connection, instance)
class MapperBase(DeclarativeMeta):
"""
MapperBase adds the id, _created and _last_updated columns to all
tables.
In general there is no reason to use this class directly other
than to extend it to add more default columns to all the bauble
tables.
"""
def __init__(cls, classname, bases, dict_):
if '__tablename__' in dict_:
cls.id = sa.Column('id', sa.Integer, primary_key=True,
autoincrement=True)
cls._created = sa.Column('_created', types.DateTime(timezone=True),
default=sa.func.now())
cls._last_updated = sa.Column('_last_updated',
types.DateTime(timezone=True),
default=sa.func.now(),
onupdate=sa.func.now())
cls.__mapper_args__ = {'extension': HistoryExtension()}
if 'top_level_count' not in dict_:
cls.top_level_count = lambda x: {classname: 1}
if 'search_view_markup_pair' not in dict_:
cls.search_view_markup_pair = lambda x: (
utils.xml_safe(str(x)),
'(%s)' % type(x).__name__)
super().__init__(classname, bases, dict_)
engine = None
"""A :class:`sqlalchemy.engine.base.Engine` used as the default
connection to the database.
"""
Session = None
"""
bauble.db.Session is created after the database has been opened with
:func:`bauble.db.open()`. bauble.db.Session should be used when you need
to do ORM based activities on a bauble database. To create a new
Session use::Uncategorized
session = bauble.db.Session()
When you are finished with the session be sure to close the session
with :func:`session.close()`. Failure to close sessions can lead to
database deadlocks, particularly when using PostgreSQL based
databases.
"""
Base = declarative_base(metaclass=MapperBase)
"""
All tables/mappers in Ghini which use the SQLAlchemy declarative
plugin for declaring tables and mappers should derive from this class.
An instance of :class:`sqlalchemy.ext.declarative.Base`
"""
metadata = Base.metadata
"""The default metadata for all Ghini tables.
An instance of :class:`sqlalchemy.schema.Metadata`
"""
history_base = declarative_base(metadata=metadata)
class History(history_base):
"""
The history table records ever changed made to every table that
inherits from :ref:`Base`
:Table name: history
:Columns:
id: :class:`sqlalchemy.types.Integer`
A unique identifier.
table_name: :class:`sqlalchemy.types.String`
The name of the table the change was made on.
table_id: :class:`sqlalchemy.types.Integer`
The id in the table of the row that was changed.
values: :class:`sqlalchemy.types.String`
The changed values.
operation: :class:`sqlalchemy.types.String`
The type of change. This is usually one of insert, update or delete.
user: :class:`sqlalchemy.types.String`
The name of the user who made the change.
timestamp: :class:`sqlalchemy.types.DateTime`
When the change was made.
"""
__tablename__ = 'history'
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
table_name = sa.Column(sa.Text, nullable=False)
table_id = sa.Column(sa.Integer, nullable=False, autoincrement=False)
values = sa.Column(sa.Text, nullable=False)
operation = sa.Column(sa.Text, nullable=False)
user = sa.Column(sa.Text)
timestamp = sa.Column(types.DateTime, nullable=False)
def open(uri, verify=True, show_error_dialogs=False):
"""
Open a database connection. This function sets bauble.db.engine to
the opened engined.
Return bauble.db.engine if successful else returns None and
bauble.db.engine remains unchanged.
:param uri: The URI of the database to open.
:type uri: str
:param verify: Where the database we connect to should be verified
as one created by Ghini. This flag is used mostly for
testing.
:type verify: bool
:param show_error_dialogs: A flag to indicate whether the error
dialogs should be displayed. This is used mostly for testing.
:type show_error_dialogs: bool
"""
# ** WARNING: this can print your passwd
logger.debug('db.open(%s)' % uri)
from sqlalchemy.orm import sessionmaker, scoped_session
global engine
new_engine = None
import sqlalchemy.pool
import bauble.prefs
if bauble.prefs.testing: # this causes trouble in production but works
# in testing. who can explain? #133, #425
new_engine = sa.create_engine(uri, echo=SQLALCHEMY_DEBUG,
implicit_returning=False,
poolclass=sqlalchemy.pool.SingletonThreadPool,
pool_size=20)
else: # this works in production but totally prevents tests from
# running. who can explain? #133, #425
new_engine = sa.create_engine(uri, echo=SQLALCHEMY_DEBUG,
implicit_returning=False,
poolclass=sqlalchemy.pool.NullPool)
# TODO: there is a problem here: the code may cause an exception, but we
# immediately loose the 'new_engine', which should know about the
# encoding used in the exception string.
try:
new_engine.connect().close() # make sure we can connect
except Exception:
logger.info('about to forget about encoding of exception text.')
raise
def _bind():
"""bind metadata to engine and create sessionmaker """
global Session, engine
if engine is not None:
engine.dispose()
engine = new_engine
metadata.bind = engine # make engine implicit for metadata
def temp():
import inspect
logger.debug('creating session %s' % str(inspect.stack()[1]))
return scoped_session(sessionmaker(bind=engine, autoflush=False))()
Session = scoped_session(sessionmaker(bind=engine, autoflush=False))
Session = temp
if new_engine is not None and not verify:
_bind()
return engine
elif new_engine is None:
return None
verify_connection(new_engine, show_error_dialogs)
_bind()
return engine
def create(import_defaults=True):
"""
Create new Ghini database at the current connection
:param import_defaults: A flag that is passed to each plugins
install() method to indicate where it should import its
default data. This is mainly used for testing. The default
value is True
:type import_defaults: bool
"""
logger.debug('entered db.create()')
if not engine:
raise ValueError('engine is None, not connected to a database')
import bauble
import bauble.meta as meta
from bauble import pluginmgr
import datetime
connection = engine.connect()
transaction = connection.begin()
try:
# TODO: here we are dropping/creating all the tables in the
# metadata whether they are in the registry or not, we should
# really only be creating those tables from registered
# plugins, maybe with an uninstall() method on Plugin
metadata.drop_all(bind=connection, checkfirst=True)
metadata.create_all(bind=connection)
# fill in the bauble meta table and install all the plugins
meta_table = meta.BaubleMeta.__table__
meta_table.insert(bind=connection).\
execute(name=meta.VERSION_KEY,
value=str(bauble.version)).close()
import time
tzlocal = datetime.timezone(-datetime.timedelta(hours=time.timezone/60/60))
meta_table.insert(bind=connection).\
execute(name=meta.CREATED_KEY,
value=str(datetime.datetime.now(tz=tzlocal))).close()
except GeneratorExit as e:
# this is here in case the main windows is closed in the middle
# of a task
# UPDATE 2009.06.18: i'm not sure if this is still relevant since we
# switched the task system to use fibra...but it doesn't hurt
# having it here until we can make sure
logger.warning('bauble.db.create(): %s' % utils.utf8(e))
transaction.rollback()
raise
except Exception as e:
logger.warning('bauble.db.create(): %s' % utils.utf8(e))
transaction.rollback()
raise
else:
transaction.commit()
finally:
connection.close()
connection = engine.connect()
transaction = connection.begin()
try:
pluginmgr.install('all', import_defaults, force=True)
except GeneratorExit as e:
# this is here in case the main windows is closed in the middle
# of a task
# UPDATE 2009.06.18: i'm not sure if this is still relevant since we
# switched the task system to use fibra...but it doesn't hurt
# having it here until we can make sure
logger.warning('bauble.db.create(): %s' % utils.utf8(e))
transaction.rollback()
raise
except Exception as e:
logger.warning('bauble.db.create(): %s' % utils.utf8(e))
transaction.rollback()
raise
else:
transaction.commit()
finally:
connection.close()
def verify_connection(engine, show_error_dialogs=False):
"""
Test whether a connection to an engine is a valid Ghini database. This
method will raise an error for the first problem it finds with the
database.
:param engine: the engine to test
:type engine: :class:`sqlalchemy.engine.Engine`
:param show_error_dialogs: flag for whether or not to show message
dialogs detailing the error, default=False
:type show_error_dialogs: bool
"""
logger.debug('entered verify_connection(%s)' % show_error_dialogs)
import bauble
if show_error_dialogs:
try:
return verify_connection(engine, False)
except error.EmptyDatabaseError:
msg = _('The database you have connected to is empty.')
utils.message_dialog(msg, Gtk.MessageType.ERROR)
raise
except error.MetaTableError:
msg = _('The database you have connected to does not have the '
'bauble meta table. This usually means that the database '
'is either corrupt or it was created with an old version '
'of Ghini')
utils.message_dialog(msg, Gtk.MessageType.ERROR)
raise
except error.TimestampError:
msg = _('The database you have connected to does not have a '
'timestamp for when it was created. This usually means '
'that there was a problem when you created the '
'database or the database you connected to wasn\'t '
'created with Ghini.')
utils.message_dialog(msg, Gtk.MessageType.ERROR)
raise
except error.VersionError as e:
msg = (_('You are using Ghini version %(version)s while the '
'database you have connected to was created with '
'version %(db_version)s\n\nSome things might not work as '
'or some of your data may become unexpectedly '
'corrupted.') %
{'version': bauble.version,
'db_version': '%s' % e.version})
utils.message_dialog(msg, Gtk.MessageType.ERROR)
raise
# check if the database has any tables
if len(engine.table_names()) == 0:
raise error.EmptyDatabaseError()
import bauble.meta as meta
# check that the database we connected to has the bauble meta table
if not engine.has_table(meta.BaubleMeta.__tablename__):
raise error.MetaTableError()
from sqlalchemy.orm import sessionmaker
# if we don't close this session before raising an exception then we
# will probably get deadlocks....i'm not really sure why
session = sessionmaker(bind=engine)()
query = session.query # (meta.BaubleMeta)
# check that the database we connected to has a "created" timestamp
# in the bauble meta table. we're not using the value though.
result = query(meta.BaubleMeta).filter_by(name=meta.CREATED_KEY).first()
if not result:
session.close()
raise error.TimestampError()
# check that the database we connected to has a "version" in the bauble
# meta table and the the major and minor version are the same
result = query(meta.BaubleMeta).filter_by(name=meta.VERSION_KEY).first()
if not result:
session.close()
raise error.VersionError(None)
try:
major, minor, revision = result.value.split('.')
except Exception:
session.close()
raise error.VersionError(result.value)
if major != bauble.version_tuple[0] or minor != bauble.version_tuple[1]:
session.close()
raise error.VersionError(result.value)
session.close()
return True
def make_note_class(name, compute_serializable_fields=None, as_dict=None, retrieve=None):
class_name = str(name + 'Note')
table_name = name.lower() + '_note'
def is_defined(self):
return bool(self.user and self.category and self.note)
def is_empty(self):
return not self.user and not self.category and not self.note
def retrieve_or_create(cls, session, keys,
create=True, update=True):
"""return database object corresponding to keys
"""
category = keys.get('category', '')
# normally, it's one note per category, but for list values, and for
# pictures, we can have more than one.
if (create and (category.startswith('[') and category.endswith(']') or
category == '<picture>')):
# dirty trick: making sure it's not going to be found!
import uuid
keys['category'] = str(uuid.uuid4())
result = super(globals()[class_name], cls).retrieve_or_create(session, keys, create, update)
keys['category'] = category
if result:
result.category = category
return result
def retrieve_default(cls, session, keys):
q = session.query(cls)
if name.lower() in keys:
q = q.join(globals()[name]).filter(
globals()[name].code == keys[name.lower()])
if 'date' in keys:
q = q.filter(cls.date == keys['date'])
if 'category' in keys:
q = q.filter(cls.category == keys['category'])
try:
return q.one()
except:
return None
def as_dict_default(self):
result = db.Serializable.as_dict(self)
result[name.lower()] = getattr(self, name.lower()).code
return result
as_dict = as_dict or as_dict_default
retrieve = retrieve or retrieve_default
bases = (Base, )
fields = {'__tablename__': table_name,
'__mapper_args__': {'order_by': table_name + '.date'},
'date': sa.Column(types.Date, default=sa.func.now()),
'user': sa.Column(sa.Unicode(64), default=''),
'category': sa.Column(sa.Unicode(32), default=''),
'type': sa.Column(sa.Unicode(32), default=''),
'note': sa.Column(sa.UnicodeText, nullable=False),
name.lower() + '_id': sa.Column(sa.Integer, sa.ForeignKey(name.lower() + '.id'), nullable=False),
name.lower(): sa.orm.relation(name, uselist=False, backref=sa.orm.backref(
'notes', cascade='all, delete-orphan')),
'retrieve': classmethod(retrieve),
'retrieve_or_create': classmethod(retrieve_or_create),
'is_defined': is_defined,
'as_dict': as_dict,
}
if compute_serializable_fields is not None:
bases = (Base, Serializable)
fields['compute_serializable_fields'] = classmethod(compute_serializable_fields)
result = type(class_name, bases, fields)
return result
class WithNotes:
key_pattern = re.compile(r'{[^:]+:(.*)}')
def __getattr__(self, name):
'''retrieve value from corresponding note(s)
the result can be an atomic value, a list, or a dictionary.
'''
if name.startswith('_sa'): # it's a SA field, don't even try to look it up
raise AttributeError(name)
result = []
is_dict = False
for n in self.notes:
if n.category is None:
pass
elif n.category == ('[%s]' % name):
result.append(n.note)
elif n.category.startswith('{%s:' % name) and n.category.endswith('}'):
is_dict = True
match = self.key_pattern.match(n.category)
key = match.group(1)
result.append((key, n.note))
elif n.category == ('<%s>' % name):
try:
return json.loads(re.sub(r'(\w+)[ ]*(?=:)', r'"\g<1>"', '{' + n.note.replace(';', ',') + '}'))
except Exception as e:
pass
try:
return json.loads(re.sub(r'(\w+)[ ]*(?=:)', r'"\g<1>"', n.note))
except Exception as e:
logger.debug('not parsed %s(%s), returning literal text »%s«', type(e), e, n.note)
return n.note
if result == []:
# if nothing was found, do not break the proxy.
raise AttributeError(name)
if is_dict:
return dict(result)
return result
class DefiningPictures:
@property
def pictures(self):
'''a list of Gtk.Image objects
'''
result = []
for n in self.notes:
if n.category != '<picture>':
continue
box = Gtk.VBox() # contains the image or the error message
utils.ImageLoader(box, n.note).start()
result.append(box)
return result
class Serializable:
import re
single_cap_re = re.compile('([A-Z])')
link_keys = []
def as_dict(self):
result = dict((col, getattr(self, col))
for col in list(self.__table__.columns.keys())
if col not in ['id']
and col[0] != '_'
and getattr(self, col) is not None
and getattr(self, col) != ''
and not col.endswith('_id'))
result['object'] = self.single_cap_re.sub(
r'_\1', self.__class__.__name__).lower()[1:]
return result
@classmethod
def correct_field_names(cls, keys):
"""correct keys dictionary according to class attributes
exchange format may use different keys than class attributes
"""
pass
@classmethod
def compute_serializable_fields(cls, session, keys):
"""create objects corresponding to keys (class dependent)
"""
return {}
@classmethod
def retrieve_or_create(cls, session, keys,
create=True, update=True):
"""return database object corresponding to keys
"""
logger.debug('initial value of keys: %s' % keys)
## first try retrieving
is_in_session = cls.retrieve(session, keys)
logger.debug('2 value of keys: %s' % keys)
if not create and not is_in_session:
logger.debug('not creating from %s; returning None (1)' % str(keys))
return None
if is_in_session and not update:
logger.debug("returning not updated existing %s" % is_in_session)
return is_in_session
try:
## some fields are given as text but actually correspond to
## different fields and should be associated to objects
extradict = cls.compute_serializable_fields(
session, keys)
## what fields must be corrected
cls.correct_field_names(keys)
except error.NoResultException:
if not is_in_session:
logger.debug("returning None (2)")
return None
else:
extradict = {}
except Exception as e:
logger.debug("this was unexpected")
raise
logger.debug('3 value of keys: %s' % keys)
# correct any timestamp, parsing it as good as possible
for k in ['_created', '_last_updated']:
if k in keys:
keys[k] = parse_date(keys[k])
logger.debug('3½ value of keys: %s' % keys)
## at this point, resulting object is either in database or not. in
## either case, the database is going to be updated.
## link_keys are python-side properties, not database associations
## and have as value objects that are possibly in the database, or
## not, but they cannot be used to construct the `self` object.
link_values = {}
for k in cls.link_keys:
if keys.get(k):
link_values[k] = keys[k]
logger.debug("link_values : %s" % str(link_values))
for k in list(keys.keys()):
if k not in class_mapper(cls).mapped_table.c:
del keys[k]
if 'id' in keys:
del keys['id']
logger.debug('4 value of keys: %s' % keys)
keys.update(extradict)
logger.debug('5 value of keys: %s' % keys)
# early construct object before building links
if not is_in_session and create:
## completing the task of building the links
logger.debug("links? %s, %s" % (cls.link_keys, list(keys.keys())))
for key in cls.link_keys:
d = link_values.get(key)
if d is None:
continue
logger.debug('recursive call to construct_from_dict %s' % d)
obj = construct_from_dict(session, d)
keys[key] = obj
logger.debug("going to create new %s with %s" % (cls, keys))
result = cls(**keys)
session.add(result)
# or possibly reuse existing object
if is_in_session and update:
result = is_in_session
## completing the task of building the links
logger.debug("links? %s, %s" % (cls.link_keys, list(keys.keys())))
for key in cls.link_keys:
d = link_values.get(key)
if d is None:
continue
logger.debug('recursive call to construct_from_dict %s' % d)
obj = construct_from_dict(session, d)
keys[key] = obj
logger.debug("going to update %s with %s" % (result, keys))
if 'id' in keys:
del keys['id']
for k, v in list(keys.items()):
if isinstance(v, dict):
if v.get('__class__') == 'datetime':
m = v.get('millis', 0)
v = datetime.datetime(1970, 1, 12)
v = v + datetime.timedelta(0, m)
else:
v = None
if v is not None:
setattr(result, k, v)
logger.debug('returning updated existing %s' % result)
session.flush()
logger.debug('returning new %s' % result)
return result
def construct_from_dict(session, obj, create=True, update=True):
## get class and remove reference
logger.debug("construct_from_dict %s" % obj)
klass = None
if 'object' in obj:
klass = class_of_object(obj['object'])
if klass is None and 'rank' in obj:
klass = globals().get(obj['rank'].capitalize())
del obj['rank']
return klass.retrieve_or_create(session, obj, create=create, update=update)
def class_of_object(o):
"""what class implements object o
"""
name = ''.join(p.capitalize() for p in o.split('_'))
cls = globals().get(name)
if cls is None:
from bauble import pluginmgr
cls = pluginmgr.provided.get(name)
return cls
class current_user_functor:
"""implement the current_user function, and allow overriding.
invoke the current_user object as a function.
invoke current_user.override(user_name) to set user name.
invoke current_user.override() to reset.
"""
def __init__(self):
self.override_value = None
def override(self, value=None):
self.override_value = value
def __call__(self):
'''return current user name: from database, or system
'''
if self.override_value:
return self.override_value
try:
if engine.name.startswith('postgresql'):
r = engine.execute('select current_user;')
user = r.fetchone()[0]
r.close()
elif engine.name.startswith('mysql'):
r = engine.execute('select current_user();')
user = r.fetchone()[0]
r.close()
else:
raise TypeError()
except:
logger.debug("retrieving user name from system")
user = (os.getenv('USER') or os.getenv('USERNAME') or
os.getenv('LOGNAME') or os.getenv('LNAME'))
return user
current_user = current_user_functor()
| gpl-2.0 |
caisq/tensorflow | tensorflow/python/debug/lib/session_debug_multi_gpu_test.py | 85 | 3476 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities under multiple (i.e., >1) GPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class SessionDebugMultiGPUTest(test_util.TensorFlowTestCase):
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def testMultiGPUSessionRun(self):
local_devices = device_lib.list_local_devices()
gpu_device_names = []
for device in local_devices:
if device.device_type == "GPU":
gpu_device_names.append(device.name)
gpu_device_names = sorted(gpu_device_names)
if len(gpu_device_names) < 2:
self.skipTest(
"This test requires at least 2 GPUs, but only %d is available." %
len(gpu_device_names))
with session.Session() as sess:
v = variables.Variable([10.0, 15.0], dtype=dtypes.float32, name="v")
with ops.device(gpu_device_names[0]):
u0 = math_ops.add(v, v, name="u0")
with ops.device(gpu_device_names[1]):
u1 = math_ops.multiply(v, v, name="u1")
w = math_ops.subtract(u1, u0, name="w")
sess.run(v.initializer)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(run_options, sess.graph,
debug_urls="file://" + self._dump_root)
run_metadata = config_pb2.RunMetadata()
self.assertAllClose(
[80.0, 195.0],
sess.run(w, options=run_options, run_metadata=run_metadata))
debug_dump_dir = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(3, len(debug_dump_dir.devices()))
self.assertAllClose(
[10.0, 15.0], debug_dump_dir.get_tensors("v", 0, "DebugIdentity")[0])
self.assertAllClose(
[20.0, 30.0], debug_dump_dir.get_tensors("u0", 0, "DebugIdentity")[0])
self.assertAllClose(
[100.0, 225.0],
debug_dump_dir.get_tensors("u1", 0, "DebugIdentity")[0])
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
Alignak-monitoring-contrib/alignak-module-log | alignak_module_logs/logs.py | 2 | 26149 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2048: Alignak contrib team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak contrib projet.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
"""
This module is an Alignak Broker module that collects the `monitoring_log` broks to send
them to a Python logger configured in the module configuration file
"""
import os
import json
import time
import queue
import logging
from logging import Formatter
from logging.handlers import TimedRotatingFileHandler
from logging.config import dictConfig as logger_dictConfig
import psutil
from alignak.stats import Stats
from alignak.basemodule import BaseModule
from alignak_backend_client.client import Backend, BackendException
from alignak_module_logs.logevent import LogEvent
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
for handler in logger.parent.handlers:
if isinstance(handler, logging.StreamHandler):
logger.parent.removeHandler(handler)
# pylint: disable=invalid-name
properties = {
'daemons': ['broker'],
'type': 'logs',
'external': True,
'phases': ['running'],
}
class UTCFormatter(logging.Formatter):
"""This logging formatter converts the log date/time to UTC"""
converter = time.gmtime
def get_instance(mod_conf):
"""Return a module instance for the modules manager
:param mod_conf: the module properties as defined globally in this file
:return:
"""
# logger.info("Give an instance of %s for alias: %s",
# mod_conf.python_name, mod_conf.module_alias)
return MonitoringLogsCollector(mod_conf)
class MonitoringLogsCollector(BaseModule):
"""Monitoring logs module main class"""
def __init__(self, mod_conf):
# pylint: disable=global-statement
"""Module initialization
mod_conf is a dictionary that contains:
- all the variables declared in the module configuration file
- a 'properties' value that is the module properties as defined globally in this file
:param mod_conf: module configuration file as a dictionary
"""
BaseModule.__init__(self, mod_conf)
# pylint: disable=global-statement
global logger
logger = logging.getLogger('alignak.module.%s' % self.alias)
# Do not change log level for this module ...
# logger.setLevel(getattr(mod_conf, 'log_level', logging.INFO))
logger.debug("inner properties: %s", self.__dict__)
logger.debug("received configuration: %s", mod_conf.__dict__)
# Internal logger for the monitoring logs
self.logger = None
self.loop_count = 0
# Self daemon monitoring (cpu, memory)
self.daemon_monitoring = False
self.daemon_monitoring_period = 10
if 'ALIGNAK_DAEMON_MONITORING' in os.environ:
self.daemon_monitoring = True
try:
self.daemon_monitoring_period = \
int(os.environ.get('ALIGNAK_DAEMON_MONITORING', '10'))
except ValueError: # pragma: no cover, simple protection
pass
if self.daemon_monitoring:
print("Module self monitoring is enabled, reporting every %d loop count."
% self.daemon_monitoring_period)
# Logger configuration file
self.logger_configuration = os.getenv('ALIGNAK_MONITORING_LOGS_CFG', None)
if not self.logger_configuration:
self.logger_configuration = getattr(mod_conf, 'logger_configuration', None)
if self.logger_configuration and self.logger_configuration != \
os.path.abspath(self.logger_configuration):
self.logger_configuration = os.path.abspath(self.logger_configuration)
# Logger default parameters (used if logger_configuration is not defined)
self.default_configuration = True
self.log_logger_name = getattr(mod_conf, 'log_logger_name', 'monitoring-logs')
self.log_dir = getattr(mod_conf, 'log_dir', '/tmp')
if "ALIGNAKLOG" in self.log_dir:
self.log_dir = '/tmp'
self.log_file = getattr(mod_conf, 'log_file', 'monitoring-logs.log')
self.log_filename = os.path.join(self.log_dir, self.log_file)
self.log_rotation_when = getattr(mod_conf, 'log_rotation_when', 'midnight')
self.log_rotation_interval = int(getattr(mod_conf, 'log_rotation_interval', '1'))
self.log_rotation_count = int(getattr(mod_conf, 'log_rotation_count', '365'))
self.log_level = getattr(mod_conf, 'log_level', 'INFO')
self.log_level = getattr(logging, self.log_level, None)
self.log_format = getattr(mod_conf, 'log_format ',
'[%(created)i] %(levelname)s: %(message)s')
self.log_date = getattr(mod_conf, 'log_date', '%Y-%m-%d %H:%M:%S %Z')
if not self.logger_configuration and not self.log_dir and not self.log_file:
logger.info("The logging feature is disabled")
else:
if self.logger_configuration:
logger.info("logger configuration defined in %s", self.logger_configuration)
self.default_configuration = False
if not os.path.exists(self.logger_configuration):
self.default_configuration = True
logger.warning("defined logger configuration file (%s) does not exist! "
"Using default configuration.", self.logger_configuration)
if self.default_configuration:
logger.info("logger default configuration:")
logger.info(" - rotating logs in %s", self.log_filename)
logger.info(" - log level: %s", self.log_level)
logger.info(" - rotation every %d %s, keeping %s files",
self.log_rotation_interval, self.log_rotation_when,
self.log_rotation_count)
self.setup_logging()
stats_host = getattr(mod_conf, 'statsd_host', 'localhost')
stats_port = int(getattr(mod_conf, 'statsd_port', '8125'))
stats_prefix = getattr(mod_conf, 'statsd_prefix', 'alignak')
statsd_enabled = (getattr(mod_conf, 'statsd_enabled', '0') != '0')
if isinstance(getattr(mod_conf, 'statsd_enabled', '0'), bool):
statsd_enabled = getattr(mod_conf, 'statsd_enabled')
graphite_enabled = (getattr(mod_conf, 'graphite_enabled', '0') != '0')
if isinstance(getattr(mod_conf, 'graphite_enabled', '0'), bool):
graphite_enabled = getattr(mod_conf, 'graphite_enabled')
logger.info("StatsD configuration: %s:%s, prefix: %s, enabled: %s, graphite: %s",
stats_host, stats_port, stats_prefix, statsd_enabled, graphite_enabled)
self.statsmgr = Stats()
# Configure our Stats manager
if not graphite_enabled:
self.statsmgr.register(self.alias, 'module',
statsd_host=stats_host, statsd_port=stats_port,
statsd_prefix=stats_prefix, statsd_enabled=statsd_enabled)
else:
self.statsmgr.connect(self.alias, 'module',
host=stats_host, port=stats_port,
prefix=stats_prefix, enabled=True)
# logger.info("StatsD configuration: %s:%s, prefix: %s, enabled: %s",
# getattr(mod_conf, 'statsd_host', 'localhost'),
# int(getattr(mod_conf, 'statsd_port', '8125')),
# getattr(mod_conf, 'statsd_prefix', 'alignak'),
# (getattr(mod_conf, 'statsd_enabled', '0') != '0'))
# self.statsmgr = Stats()
# self.statsmgr.register(self.alias, 'module',
# statsd_host=getattr(mod_conf, 'statsd_host', 'localhost'),
# statsd_port=int(getattr(mod_conf, 'statsd_port', '8125')),
# statsd_prefix=getattr(mod_conf, 'statsd_prefix', 'alignak'),
# statsd_enabled=(getattr(mod_conf, 'statsd_enabled', '0') != '0'))
# Alignak Backend part
# ---
self.backend_available = False
self.backend_connected = False
self.backend_url = getattr(mod_conf, 'alignak_backend', '')
if self.backend_url:
logger.info("Alignak backend endpoint: %s", self.backend_url)
self.client_processes = int(getattr(mod_conf, 'client_processes', '1'))
logger.info("Number of processes used by backend client: %s", self.client_processes)
self.backend_connected = False
self.backend_connection_retry_planned = 0
try:
self.backend_connection_retry_delay = int(getattr(mod_conf,
'backend_connection_retry_delay',
'10'))
except ValueError:
self.backend_connection_retry_delay = 10
self.backend_errors_count = 0
self.backend_username = getattr(mod_conf, 'username', '')
self.backend_password = getattr(mod_conf, 'password', '')
self.backend_generate = getattr(mod_conf, 'allowgeneratetoken', False)
self.backend_token = getattr(mod_conf, 'token', '')
self.backend = Backend(self.backend_url, self.client_processes)
if not self.backend.token and not self.backend_username:
logger.warning("No Alignak backend credentials configured (empty token and "
"empty username. "
"The requested backend connection will not be available")
self.backend_url = ''
else:
# Log in to the backend
self.logged_in = False
self.backend_connected = self.backend_connection()
self.backend_available = self.backend_connected
# Get the default realm
self.default_realm = self.get_default_realm()
else:
logger.warning('Alignak Backend is not configured. '
'Some module features will not be available.')
def init(self):
"""Handle this module "post" init ; just before it'll be started.
Like just open necessaries file(s), database(s),
or whatever the module will need.
:return: None
"""
return True
def setup_logging(self):
"""Setup logging configuration
:return: none
"""
self.logger = logging.getLogger(self.log_logger_name)
if self.default_configuration:
# Set logger level
self.logger.setLevel(self.log_level)
logger.debug("Logger (default) handlers: %s", self.logger.handlers)
if not self.logger.handlers:
print("Log dir: %s" % self.log_dir)
print("Log filename: %s" % self.log_filename)
file_handler = TimedRotatingFileHandler(self.log_filename.replace("ALIGNAKLOG",
self.log_dir),
when=self.log_rotation_when,
interval=self.log_rotation_interval,
backupCount=self.log_rotation_count)
file_handler.setFormatter(Formatter(self.log_format, self.log_date))
self.logger.addHandler(file_handler)
logger.debug("Logger (default), added a TimedRotatingFileHandler")
else:
try:
with open(self.logger_configuration, 'rt') as my_logger_configuration_file:
config = json.load(my_logger_configuration_file)
# Update the declared log file names with the log directory
for hdlr in config['handlers']:
if 'filename' in config['handlers'][hdlr]:
config['handlers'][hdlr]['filename'] = \
config['handlers'][hdlr]['filename'].replace("ALIGNAKLOG",
self.log_dir)
logger_dictConfig(config)
except ValueError as exp:
logger.error("Logger configuration file is not parsable correctly!")
logger.exception(exp)
def backend_connection(self):
"""Backend connection to check live state update is allowed
:return: True/False
"""
if self.backend_login():
self.get_default_realm()
try:
start = time.time()
params = {'where': '{"token":"%s"}' % self.backend.token}
users = self.backend.get('user', params)
self.statsmgr.counter('backend-get.user', 1)
self.statsmgr.timer('backend-get-time.user', time.time() - start)
except BackendException as exp:
logger.warning("Error on backend when retrieving user information: %s", exp)
else:
try:
for item in users['_items']:
self.logged_in = item['can_update_livestate']
return self.logged_in
except Exception as exp:
logger.error("Can't get the user information in the backend response: %s", exp)
logger.error("Configured user account is not allowed for this module")
return False
def backend_login(self):
"""Log in to the backend
:return: bool
"""
generate = 'enabled'
if not self.backend_generate:
generate = 'disabled'
if self.backend_token:
# We have a token, don't ask for a new one
self.backend.token = self.backend_token
connected = True # Not really yet, but assume yes
else:
if not self.backend_username or not self.backend_password:
logger.error("No user or password supplied, and no default token defined. "
"Can't connect to backend")
connected = False
else:
try:
start = time.time()
connected = self.backend.login(self.backend_username, self.backend_password,
generate)
self.statsmgr.counter('backend-login', 1)
self.statsmgr.timer('backend-login-time', time.time() - start)
except BackendException as exp:
logger.error("Error on backend login: %s", exp)
connected = False
return connected
def get_default_realm(self):
"""
Retrieves the default top level realm for the connected user
:return: str or None
"""
default_realm = None
if self.backend_connected:
try:
start = time.time()
result = self.backend.get('/realm', {'max_results': 1, 'sort': '_level'})
self.statsmgr.counter('backend-get.realm', 1)
self.statsmgr.timer('backend-get-time.realm', time.time() - start)
except BackendException as exp:
logger.warning("Error on backend when retrieving default realm: %s", exp)
else:
try:
default_realm = result['_items'][0]['_id']
except Exception as exp:
logger.error("Can't get the default realm in the backend response: %s", exp)
return default_realm
def do_loop_turn(self): # pragma: no cover
"""This function is present because of an abstract function in the BaseModule class"""
logger.info("In loop")
time.sleep(1)
def manage_brok(self, brok):
"""We got the data to manage
:param brok: Brok object
:type brok: object
:return: False if a backend post error happens
"""
# Ignore all except 'monitoring_log' broks...
if brok.type not in ['monitoring_log']:
return False
level = brok.data['level'].lower()
if level not in ['debug', 'info', 'warning', 'error', 'critical']:
return False
logger.debug("Got monitoring log brok: %s", brok)
# Send to configured logger
if self.logger:
message = brok.data['message']
message = message.replace('\r', '\\r')
message = message.replace('\n', '\\n')
func = getattr(self.logger, level)
func(message)
if not self.backend_url:
return False
if not self.backend_connected and int(time.time() > self.backend_connection_retry_planned):
self.backend_connected = self.backend_connection()
if not self.backend_connected:
logger.error("Alignak backend connection is not available. Ignoring event.")
return False
# Try to get a monitoring event
try:
event = LogEvent(('[%s] ' % int(time.time())) + brok.data['message'])
if event.valid:
# -------------------------------------------
# Add an history event
self.statsmgr.counter('monitoring-event-get.%s' % event.event_type, 1)
data = {}
if event.event_type == 'TIMEPERIOD':
data = {
"host_name": 'n/a',
"service_name": 'n/a',
"user_name": "Alignak",
"type": "monitoring.timeperiod_transition",
"message": brok.data['message'],
}
if event.event_type == 'NOTIFICATION':
data = {
"host_name": event.data['hostname'],
"service_name": event.data['service_desc'] or 'n/a',
"user_name": "Alignak",
"type": "monitoring.notification",
"message": brok.data['message'],
}
if event.event_type == 'ALERT':
data = {
"host_name": event.data['hostname'],
"service_name": event.data['service_desc'] or 'n/a',
"user_name": "Alignak",
"type": "monitoring.alert",
"message": brok.data['message'],
}
if event.event_type == 'DOWNTIME':
downtime_type = "monitoring.downtime_start"
if event.data['state'] == 'STOPPED':
downtime_type = "monitoring.downtime_end"
if event.data['state'] == 'CANCELLED':
downtime_type = "monitoring.downtime_cancelled"
data = {
"host_name": event.data['hostname'],
"service_name": event.data['service_desc'] or 'n/a',
"user_name": "Alignak",
"type": downtime_type,
"message": brok.data['message'],
}
if event.event_type == 'FLAPPING':
flapping_type = "monitoring.flapping_start"
if event.data['state'] == 'STOPPED':
flapping_type = "monitoring.flapping_stop"
data = {
"host_name": event.data['hostname'],
"service_name": event.data['service_desc'] or 'n/a',
"user_name": "Alignak",
"type": flapping_type,
"message": brok.data['message'],
}
if event.event_type == 'COMMENT':
data = {
"host_name": event.data['hostname'],
"service_name": event.data['service_desc'] or 'n/a',
"user_name": event.data['author'] or 'Alignak',
"type": "webui.comment",
"message": event.data['comment'],
}
if data:
try:
logger.debug("Posting history data: %s", data)
start = time.time()
self.backend.post('history', data)
self.statsmgr.counter('monitoring-event-stored.%s' % event.event_type, 1)
self.statsmgr.timer('backend-post-time.history', time.time() - start)
except BackendException as exp:
logger.exception("Exception: %s", exp)
logger.error("Exception response: %s", exp.response)
return False
else:
self.statsmgr.counter('monitoring-event-ignored.%s' % event.event_type, 1)
logger.debug("Monitoring event not stored in the backend: %s",
brok.data['message'])
else:
logger.warning("No monitoring event detected from: %s", brok.data['message'])
except ValueError:
logger.warning("Unable to decode a monitoring event from: %s", brok.data['message'])
return True
def main(self):
"""Main loop of the process
This module is an "external" module
:return:
"""
# Set the OS process title
self.set_proctitle(self.alias)
self.set_exit_handler()
logger.info("starting...")
# Increased on each loop turn
self.loop_count = 0
while not self.interrupted:
# Increment loop count
self.loop_count += 1
try:
queue_size = self.to_q.qsize()
if queue_size:
logger.debug("queue length: %s", queue_size)
self.statsmgr.gauge('queue-size', queue_size)
message = self.to_q.get_nowait()
start = time.time()
for brok in message:
# Prepare and manage each brok in the queue message
brok.prepare()
self.manage_brok(brok)
logger.debug("time to manage %s broks (%d secs)", len(message), time.time() - start)
self.statsmgr.timer('managed-broks-time', time.time() - start)
except queue.Empty:
# logger.debug("No message in the module queue")
time.sleep(0.1)
if self.daemon_monitoring and (self.loop_count
% self.daemon_monitoring_period == 1):
perfdatas = []
my_process = psutil.Process()
with my_process.oneshot():
perfdatas.append("num_threads=%d" % my_process.num_threads())
self.statsmgr.counter("num_threads", my_process.num_threads())
# perfdatas.append("num_ctx_switches=%d" % my_process.num_ctx_switches())
perfdatas.append("num_fds=%d" % my_process.num_fds())
# perfdatas.append("num_handles=%d" % my_process.num_handles())
perfdatas.append("create_time=%d" % my_process.create_time())
perfdatas.append("cpu_num=%d" % my_process.cpu_num())
self.statsmgr.counter("cpu_num", my_process.cpu_num())
perfdatas.append("cpu_usable=%d" % len(my_process.cpu_affinity()))
self.statsmgr.counter("cpu_usable", len(my_process.cpu_affinity()))
perfdatas.append("cpu_percent=%.2f%%" % my_process.cpu_percent())
self.statsmgr.counter("cpu_percent", my_process.cpu_percent())
cpu_times_percent = my_process.cpu_times()
for key in cpu_times_percent._fields:
perfdatas.append("cpu_%s_time=%.2fs"
% (key, getattr(cpu_times_percent, key)))
self.statsmgr.counter("cpu_%s_time" % key,
getattr(cpu_times_percent, key))
memory = my_process.memory_full_info()
for key in memory._fields:
perfdatas.append("mem_%s=%db" % (key, getattr(memory, key)))
self.statsmgr.counter("mem_%s" % key, getattr(memory, key))
logger.debug("Daemon %s (%s), pid=%s, ppid=%s, status=%s, cpu/memory|%s",
self.name, my_process.name(),
my_process.pid, my_process.ppid(),
my_process.status(), " ".join(perfdatas))
logger.info("stopping...")
# Properly close all the Python logging stuff
# See: http://stackoverflow.com/questions/24816456/python-logging-wont-shutdown
logging.shutdown()
logger.info("stopped")
| agpl-3.0 |
lukeiwanski/tensorflow | tensorflow/python/feature_column/feature_column_test.py | 4 | 264479 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.feature_column.feature_column import _CategoricalColumn
from tensorflow.python.feature_column.feature_column import _DenseColumn
from tensorflow.python.feature_column.feature_column import _FeatureColumn
from tensorflow.python.feature_column.feature_column import _LazyBuilder
from tensorflow.python.feature_column.feature_column import _LinearModel
from tensorflow.python.feature_column.feature_column import _transform_features
from tensorflow.python.feature_column.feature_column import InputLayer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
def _initialized_session(config=None):
sess = session.Session(config=config)
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
class LazyColumnTest(test.TestCase):
def test_transormations_called_once(self):
class TransformCounter(_FeatureColumn):
def __init__(self):
self.num_transform = 0
@property
def name(self):
return 'TransformCounter'
def _transform_feature(self, cache):
self.num_transform += 1 # Count transform calls.
return cache.get('a')
@property
def _parse_example_spec(self):
pass
builder = _LazyBuilder(features={'a': [[2], [3.]]})
column = TransformCounter()
self.assertEqual(0, column.num_transform)
builder.get(column)
self.assertEqual(1, column.num_transform)
builder.get(column)
self.assertEqual(1, column.num_transform)
def test_returns_transform_output(self):
class Transformer(_FeatureColumn):
@property
def name(self):
return 'Transformer'
def _transform_feature(self, cache):
return 'Output'
@property
def _parse_example_spec(self):
pass
builder = _LazyBuilder(features={'a': [[2], [3.]]})
column = Transformer()
self.assertEqual('Output', builder.get(column))
self.assertEqual('Output', builder.get(column))
def test_does_not_pollute_given_features_dict(self):
class Transformer(_FeatureColumn):
@property
def name(self):
return 'Transformer'
def _transform_feature(self, cache):
return 'Output'
@property
def _parse_example_spec(self):
pass
features = {'a': [[2], [3.]]}
builder = _LazyBuilder(features=features)
builder.get(Transformer())
self.assertEqual(['a'], list(features.keys()))
def test_error_if_feature_is_not_found(self):
builder = _LazyBuilder(features={'a': [[2], [3.]]})
with self.assertRaisesRegexp(ValueError,
'bbb is not in features dictionary'):
builder.get('bbb')
with self.assertRaisesRegexp(ValueError,
'bbb is not in features dictionary'):
builder.get(u'bbb')
def test_not_supported_feature_column(self):
class NotAProperColumn(_FeatureColumn):
@property
def name(self):
return 'NotAProperColumn'
def _transform_feature(self, cache):
# It should return not None.
pass
@property
def _parse_example_spec(self):
pass
builder = _LazyBuilder(features={'a': [[2], [3.]]})
with self.assertRaisesRegexp(ValueError,
'NotAProperColumn is not supported'):
builder.get(NotAProperColumn())
def test_key_should_be_string_or_feature_colum(self):
class NotAFeatureColumn(object):
pass
builder = _LazyBuilder(features={'a': [[2], [3.]]})
with self.assertRaisesRegexp(
TypeError, '"key" must be either a "str" or "_FeatureColumn".'):
builder.get(NotAFeatureColumn())
class NumericColumnTest(test.TestCase):
def test_defaults(self):
a = fc.numeric_column('aaa')
self.assertEqual('aaa', a.key)
self.assertEqual('aaa', a.name)
self.assertEqual('aaa', a._var_scope_name)
self.assertEqual((1,), a.shape)
self.assertIsNone(a.default_value)
self.assertEqual(dtypes.float32, a.dtype)
self.assertIsNone(a.normalizer_fn)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.numeric_column(key=('aaa',))
def test_shape_saved_as_tuple(self):
a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]])
self.assertEqual((1, 2), a.shape)
def test_default_value_saved_as_tuple(self):
a = fc.numeric_column('aaa', default_value=4.)
self.assertEqual((4.,), a.default_value)
a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]])
self.assertEqual(((3., 2.),), a.default_value)
def test_shape_and_default_value_compatibility(self):
fc.numeric_column('aaa', shape=[2], default_value=[1, 2.])
with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
fc.numeric_column('aaa', shape=[2], default_value=[1, 2, 3.])
fc.numeric_column(
'aaa', shape=[3, 2], default_value=[[2, 3], [1, 2], [2, 3.]])
with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
fc.numeric_column(
'aaa', shape=[3, 1], default_value=[[2, 3], [1, 2], [2, 3.]])
with self.assertRaisesRegexp(ValueError, 'The shape of default_value'):
fc.numeric_column(
'aaa', shape=[3, 3], default_value=[[2, 3], [1, 2], [2, 3.]])
def test_default_value_type_check(self):
fc.numeric_column(
'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.float32)
fc.numeric_column(
'aaa', shape=[2], default_value=[1, 2], dtype=dtypes.int32)
with self.assertRaisesRegexp(TypeError, 'must be compatible with dtype'):
fc.numeric_column(
'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.int32)
with self.assertRaisesRegexp(TypeError,
'default_value must be compatible with dtype'):
fc.numeric_column('aaa', default_value=['string'])
def test_shape_must_be_positive_integer(self):
with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'):
fc.numeric_column(
'aaa', shape=[
1.0,
])
with self.assertRaisesRegexp(ValueError,
'shape dimensions must be greater than 0'):
fc.numeric_column(
'aaa', shape=[
0,
])
def test_dtype_is_convertible_to_float(self):
with self.assertRaisesRegexp(ValueError,
'dtype must be convertible to float'):
fc.numeric_column('aaa', dtype=dtypes.string)
def test_scalar_default_value_fills_the_shape(self):
a = fc.numeric_column('aaa', shape=[2, 3], default_value=2.)
self.assertEqual(((2., 2., 2.), (2., 2., 2.)), a.default_value)
def test_parse_spec(self):
a = fc.numeric_column('aaa', shape=[2, 3], dtype=dtypes.int32)
self.assertEqual({
'aaa': parsing_ops.FixedLenFeature((2, 3), dtype=dtypes.int32)
}, a._parse_example_spec)
def test_parse_example_no_default_value(self):
price = fc.numeric_column('price', shape=[2])
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([price]))
self.assertIn('price', features)
with self.test_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
def test_parse_example_with_default_value(self):
price = fc.numeric_column('price', shape=[2], default_value=11.)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
no_data = example_pb2.Example(features=feature_pb2.Features(
feature={
'something_else':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString(),
no_data.SerializeToString()],
features=fc.make_parse_example_spec([price]))
self.assertIn('price', features)
with self.test_session():
self.assertAllEqual([[20., 110.], [11., 11.]], features['price'].eval())
def test_normalizer_fn_must_be_callable(self):
with self.assertRaisesRegexp(TypeError, 'must be a callable'):
fc.numeric_column('price', normalizer_fn='NotACallable')
def test_normalizer_fn_transform_feature(self):
def _increment_two(input_tensor):
return input_tensor + 2.
price = fc.numeric_column('price', shape=[2], normalizer_fn=_increment_two)
output = _transform_features({'price': [[1., 2.], [5., 6.]]}, [price])
with self.test_session():
self.assertAllEqual([[3., 4.], [7., 8.]], output[price].eval())
def test_get_dense_tensor(self):
def _increment_two(input_tensor):
return input_tensor + 2.
price = fc.numeric_column('price', shape=[2], normalizer_fn=_increment_two)
builder = _LazyBuilder({'price': [[1., 2.], [5., 6.]]})
self.assertEqual(builder.get(price), price._get_dense_tensor(builder))
def test_sparse_tensor_not_supported(self):
price = fc.numeric_column('price')
builder = _LazyBuilder({
'price':
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[0.3], dense_shape=[1, 1])
})
with self.assertRaisesRegexp(ValueError, 'must be a Tensor'):
price._transform_feature(builder)
def test_deep_copy(self):
a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3., 2.]])
a_copy = copy.deepcopy(a)
self.assertEqual(a_copy.name, 'aaa')
self.assertEqual(a_copy.shape, (1, 2))
self.assertEqual(a_copy.default_value, ((3., 2.),))
def test_numpy_default_value(self):
a = fc.numeric_column(
'aaa', shape=[1, 2], default_value=np.array([[3., 2.]]))
self.assertEqual(a.default_value, ((3., 2.),))
def test_linear_model(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.]], price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[10.], [50.]], predictions.eval())
def test_keras_linear_model(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = get_keras_linear_model_predictions(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.]], price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[10.], [50.]], predictions.eval())
class BucketizedColumnTest(test.TestCase):
def test_invalid_source_column_type(self):
a = fc.categorical_column_with_hash_bucket('aaa', hash_bucket_size=10)
with self.assertRaisesRegexp(
ValueError,
'source_column must be a column generated with numeric_column'):
fc.bucketized_column(a, boundaries=[0, 1])
def test_invalid_source_column_shape(self):
a = fc.numeric_column('aaa', shape=[2, 3])
with self.assertRaisesRegexp(
ValueError, 'source_column must be one-dimensional column'):
fc.bucketized_column(a, boundaries=[0, 1])
def test_invalid_boundaries(self):
a = fc.numeric_column('aaa')
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=None)
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=1.)
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=[1, 0])
with self.assertRaisesRegexp(
ValueError, 'boundaries must be a sorted list'):
fc.bucketized_column(a, boundaries=[1, 1])
def test_name(self):
a = fc.numeric_column('aaa', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
self.assertEqual('aaa_bucketized', b.name)
def test_var_scope_name(self):
a = fc.numeric_column('aaa', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
self.assertEqual('aaa_bucketized', b._var_scope_name)
def test_parse_spec(self):
a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
self.assertEqual({
'aaa': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32)
}, b._parse_example_spec)
def test_variable_shape(self):
a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
# Column 'aaa` has shape [2] times three buckets -> variable_shape=[2, 3].
self.assertAllEqual((2, 3), b._variable_shape)
def test_num_buckets(self):
a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
# Column 'aaa` has shape [2] times three buckets -> num_buckets=6.
self.assertEqual(6, b._num_buckets)
def test_parse_example(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([bucketized_price]))
self.assertIn('price', features)
with self.test_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
def test_transform_feature(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
transformed_tensor = _transform_features({
'price': [[-1., 1.], [5., 6.]]
}, [bucketized_price])
with _initialized_session():
self.assertAllEqual([[0, 1], [3, 4]],
transformed_tensor[bucketized_price].eval())
def test_get_dense_tensor_one_input_value(self):
"""Tests _get_dense_tensor() for input with shape=[1]."""
price = fc.numeric_column('price', shape=[1])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
builder = _LazyBuilder({'price': [[-1.], [1.], [5.], [6.]]})
with _initialized_session():
bucketized_price_tensor = bucketized_price._get_dense_tensor(builder)
self.assertAllClose(
# One-hot tensor.
[[[1., 0., 0., 0., 0.]],
[[0., 1., 0., 0., 0.]],
[[0., 0., 0., 1., 0.]],
[[0., 0., 0., 0., 1.]]],
bucketized_price_tensor.eval())
def test_get_dense_tensor_two_input_values(self):
"""Tests _get_dense_tensor() for input with shape=[2]."""
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
builder = _LazyBuilder({'price': [[-1., 1.], [5., 6.]]})
with _initialized_session():
bucketized_price_tensor = bucketized_price._get_dense_tensor(builder)
self.assertAllClose(
# One-hot tensor.
[[[1., 0., 0., 0., 0.], [0., 1., 0., 0., 0.]],
[[0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.]]],
bucketized_price_tensor.eval())
def test_get_sparse_tensors_one_input_value(self):
"""Tests _get_sparse_tensors() for input with shape=[1]."""
price = fc.numeric_column('price', shape=[1])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
builder = _LazyBuilder({'price': [[-1.], [1.], [5.], [6.]]})
with _initialized_session() as sess:
id_weight_pair = bucketized_price._get_sparse_tensors(builder)
self.assertIsNone(id_weight_pair.weight_tensor)
id_tensor_value = sess.run(id_weight_pair.id_tensor)
self.assertAllEqual(
[[0, 0], [1, 0], [2, 0], [3, 0]], id_tensor_value.indices)
self.assertAllEqual([0, 1, 3, 4], id_tensor_value.values)
self.assertAllEqual([4, 1], id_tensor_value.dense_shape)
def test_get_sparse_tensors_two_input_values(self):
"""Tests _get_sparse_tensors() for input with shape=[2]."""
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
builder = _LazyBuilder({'price': [[-1., 1.], [5., 6.]]})
with _initialized_session() as sess:
id_weight_pair = bucketized_price._get_sparse_tensors(builder)
self.assertIsNone(id_weight_pair.weight_tensor)
id_tensor_value = sess.run(id_weight_pair.id_tensor)
self.assertAllEqual(
[[0, 0], [0, 1], [1, 0], [1, 1]], id_tensor_value.indices)
# Values 0-4 correspond to the first column of the input price.
# Values 5-9 correspond to the second column of the input price.
self.assertAllEqual([0, 6, 3, 9], id_tensor_value.values)
self.assertAllEqual([2, 2], id_tensor_value.dense_shape)
def test_sparse_tensor_input_not_supported(self):
price = fc.numeric_column('price')
bucketized_price = fc.bucketized_column(price, boundaries=[0, 1])
builder = _LazyBuilder({
'price':
sparse_tensor.SparseTensor(
indices=[[0, 0]], values=[0.3], dense_shape=[1, 1])
})
with self.assertRaisesRegexp(ValueError, 'must be a Tensor'):
bucketized_price._transform_feature(builder)
def test_deep_copy(self):
a = fc.numeric_column('aaa', shape=[2])
a_bucketized = fc.bucketized_column(a, boundaries=[0, 1])
a_bucketized_copy = copy.deepcopy(a_bucketized)
self.assertEqual(a_bucketized_copy.name, 'aaa_bucketized')
self.assertAllEqual(a_bucketized_copy._variable_shape, (2, 3))
self.assertEqual(a_bucketized_copy.boundaries, (0, 1))
def test_linear_model_one_input_value(self):
"""Tests linear_model() for input with shape=[1]."""
price = fc.numeric_column('price', shape=[1])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1.], [1.], [5.], [6.]]}
predictions = fc.linear_model(features, [bucketized_price])
bias = get_linear_model_bias()
bucketized_price_var = get_linear_model_column_var(bucketized_price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
# One weight variable per bucket, all initialized to zero.
self.assertAllClose(
[[0.], [0.], [0.], [0.], [0.]], bucketized_price_var.eval())
self.assertAllClose([[0.], [0.], [0.], [0.]], predictions.eval())
sess.run(bucketized_price_var.assign(
[[10.], [20.], [30.], [40.], [50.]]))
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 1st bucket, whose weight is 20.
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 4th bucket, whose weight is 50.
self.assertAllClose([[10.], [20.], [40.], [50.]], predictions.eval())
sess.run(bias.assign([1.]))
self.assertAllClose([[11.], [21.], [41.], [51.]], predictions.eval())
def test_linear_model_two_input_values(self):
"""Tests linear_model() for input with shape=[2]."""
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1., 1.], [5., 6.]]}
predictions = fc.linear_model(features, [bucketized_price])
bias = get_linear_model_bias()
bucketized_price_var = get_linear_model_column_var(bucketized_price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
# One weight per bucket per input column, all initialized to zero.
self.assertAllClose(
[[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]],
bucketized_price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(bucketized_price_var.assign(
[[10.], [20.], [30.], [40.], [50.],
[60.], [70.], [80.], [90.], [100.]]))
# 1st example:
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 6th bucket, whose weight is 70.
# 2nd example:
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 9th bucket, whose weight is 100.
self.assertAllClose([[80.], [140.]], predictions.eval())
sess.run(bias.assign([1.]))
self.assertAllClose([[81.], [141.]], predictions.eval())
def test_keras_linear_model_one_input_value(self):
"""Tests _LinearModel for input with shape=[1]."""
price = fc.numeric_column('price', shape=[1])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1.], [1.], [5.], [6.]]}
predictions = get_keras_linear_model_predictions(features,
[bucketized_price])
bias = get_linear_model_bias()
bucketized_price_var = get_linear_model_column_var(bucketized_price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
# One weight variable per bucket, all initialized to zero.
self.assertAllClose([[0.], [0.], [0.], [0.], [0.]],
bucketized_price_var.eval())
self.assertAllClose([[0.], [0.], [0.], [0.]], predictions.eval())
sess.run(
bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.]]))
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 1st bucket, whose weight is 20.
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 4th bucket, whose weight is 50.
self.assertAllClose([[10.], [20.], [40.], [50.]], predictions.eval())
sess.run(bias.assign([1.]))
self.assertAllClose([[11.], [21.], [41.], [51.]], predictions.eval())
def test_keras_linear_model_two_input_values(self):
"""Tests _LinearModel for input with shape=[2]."""
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6])
with ops.Graph().as_default():
features = {'price': [[-1., 1.], [5., 6.]]}
predictions = get_keras_linear_model_predictions(features,
[bucketized_price])
bias = get_linear_model_bias()
bucketized_price_var = get_linear_model_column_var(bucketized_price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
# One weight per bucket per input column, all initialized to zero.
self.assertAllClose(
[[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]],
bucketized_price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(
bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.],
[60.], [70.], [80.], [90.], [100.]]))
# 1st example:
# price -1. is in the 0th bucket, whose weight is 10.
# price 1. is in the 6th bucket, whose weight is 70.
# 2nd example:
# price 5. is in the 3rd bucket, whose weight is 40.
# price 6. is in the 9th bucket, whose weight is 100.
self.assertAllClose([[80.], [140.]], predictions.eval())
sess.run(bias.assign([1.]))
self.assertAllClose([[81.], [141.]], predictions.eval())
class HashedCategoricalColumnTest(test.TestCase):
def test_defaults(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10)
self.assertEqual('aaa', a.name)
self.assertEqual('aaa', a._var_scope_name)
self.assertEqual('aaa', a.key)
self.assertEqual(10, a.hash_bucket_size)
self.assertEqual(dtypes.string, a.dtype)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.categorical_column_with_hash_bucket(('key',), 10)
def test_bucket_size_should_be_given(self):
with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be set.'):
fc.categorical_column_with_hash_bucket('aaa', None)
def test_bucket_size_should_be_positive(self):
with self.assertRaisesRegexp(ValueError,
'hash_bucket_size must be at least 1'):
fc.categorical_column_with_hash_bucket('aaa', 0)
def test_dtype_should_be_string_or_integer(self):
fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.string)
fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.float32)
def test_deep_copy(self):
original = fc.categorical_column_with_hash_bucket('aaa', 10)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(10, column.hash_bucket_size)
self.assertEqual(10, column._num_buckets)
self.assertEqual(dtypes.string, column.dtype)
def test_parse_spec_string(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.string)
}, a._parse_example_spec)
def test_parse_spec_int(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, a._parse_example_spec)
def test_parse_example(self):
a = fc.categorical_column_with_hash_bucket('aaa', 10)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_strings_should_be_hashed(self):
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
outputs = _transform_features({'wire': wire_tensor}, [hashed_sparse])
output = outputs[hashed_sparse]
# Check exact hashed output. If hashing changes this test will break.
expected_values = [6, 4, 1]
with self.test_session():
self.assertEqual(dtypes.int64, output.values.dtype)
self.assertAllEqual(expected_values, output.values.eval())
self.assertAllEqual(wire_tensor.indices.eval(), output.indices.eval())
self.assertAllEqual(wire_tensor.dense_shape.eval(),
output.dense_shape.eval())
def test_tensor_dtype_should_be_string_or_integer(self):
string_fc = fc.categorical_column_with_hash_bucket(
'a_string', 10, dtype=dtypes.string)
int_fc = fc.categorical_column_with_hash_bucket(
'a_int', 10, dtype=dtypes.int32)
float_fc = fc.categorical_column_with_hash_bucket(
'a_float', 10, dtype=dtypes.string)
int_tensor = sparse_tensor.SparseTensor(
values=[101],
indices=[[0, 0]],
dense_shape=[1, 1])
string_tensor = sparse_tensor.SparseTensor(
values=['101'],
indices=[[0, 0]],
dense_shape=[1, 1])
float_tensor = sparse_tensor.SparseTensor(
values=[101.],
indices=[[0, 0]],
dense_shape=[1, 1])
builder = _LazyBuilder({
'a_int': int_tensor,
'a_string': string_tensor,
'a_float': float_tensor
})
builder.get(string_fc)
builder.get(int_fc)
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
builder.get(float_fc)
def test_dtype_should_match_with_tensor(self):
hashed_sparse = fc.categorical_column_with_hash_bucket(
'wire', 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
builder = _LazyBuilder({'wire': wire_tensor})
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
builder.get(hashed_sparse)
def test_ints_should_be_hashed(self):
hashed_sparse = fc.categorical_column_with_hash_bucket(
'wire', 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=[101, 201, 301],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
builder = _LazyBuilder({'wire': wire_tensor})
output = builder.get(hashed_sparse)
# Check exact hashed output. If hashing changes this test will break.
expected_values = [3, 7, 5]
with self.test_session():
self.assertAllEqual(expected_values, output.values.eval())
def test_int32_64_is_compatible(self):
hashed_sparse = fc.categorical_column_with_hash_bucket(
'wire', 10, dtype=dtypes.int64)
wire_tensor = sparse_tensor.SparseTensor(
values=constant_op.constant([101, 201, 301], dtype=dtypes.int32),
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
builder = _LazyBuilder({'wire': wire_tensor})
output = builder.get(hashed_sparse)
# Check exact hashed output. If hashing changes this test will break.
expected_values = [3, 7, 5]
with self.test_session():
self.assertAllEqual(expected_values, output.values.eval())
def test_get_sparse_tensors(self):
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
builder = _LazyBuilder({
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
})
id_weight_pair = hashed_sparse._get_sparse_tensors(builder)
self.assertIsNone(id_weight_pair.weight_tensor)
self.assertEqual(builder.get(hashed_sparse), id_weight_pair.id_tensor)
def test_get_sparse_tensors_weight_collections(self):
column = fc.categorical_column_with_hash_bucket('aaa', 10)
inputs = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
column._get_sparse_tensors(
_LazyBuilder({
'aaa': inputs
}), weight_collections=('my_weights',))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertItemsEqual([], ops.get_collection('my_weights'))
def test_get_sparse_tensors_dense_input(self):
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
builder = _LazyBuilder({'wire': (('omar', ''), ('stringer', 'marlo'))})
id_weight_pair = hashed_sparse._get_sparse_tensors(builder)
self.assertIsNone(id_weight_pair.weight_tensor)
self.assertEqual(builder.get(hashed_sparse), id_weight_pair.id_tensor)
def test_linear_model(self):
wire_column = fc.categorical_column_with_hash_bucket('wire', 4)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = fc.linear_model({
wire_column.name: sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 3: wire_var[3] = 4
# 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6
self.assertAllClose(((4.,), (6.,)), predictions.eval())
def test_keras_linear_model(self):
wire_column = fc.categorical_column_with_hash_bucket('wire', 4)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
wire_column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 3: wire_var[3] = 4
# 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6
self.assertAllClose(((4.,), (6.,)), predictions.eval())
class CrossedColumnTest(test.TestCase):
def test_keys_empty(self):
with self.assertRaisesRegexp(
ValueError, 'keys must be a list with length > 1'):
fc.crossed_column([], 10)
def test_keys_length_one(self):
with self.assertRaisesRegexp(
ValueError, 'keys must be a list with length > 1'):
fc.crossed_column(['a'], 10)
def test_key_type_unsupported(self):
with self.assertRaisesRegexp(ValueError, 'Unsupported key type'):
fc.crossed_column(['a', fc.numeric_column('c')], 10)
with self.assertRaisesRegexp(
ValueError, 'categorical_column_with_hash_bucket is not supported'):
fc.crossed_column(
['a', fc.categorical_column_with_hash_bucket('c', 10)], 10)
def test_hash_bucket_size_negative(self):
with self.assertRaisesRegexp(
ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], -1)
def test_hash_bucket_size_zero(self):
with self.assertRaisesRegexp(
ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], 0)
def test_hash_bucket_size_none(self):
with self.assertRaisesRegexp(
ValueError, 'hash_bucket_size must be > 1'):
fc.crossed_column(['a', 'c'], None)
def test_name(self):
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([b, 'c', crossed1], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_name_ordered_alphabetically(self):
"""Tests that the name does not depend on the order of given columns."""
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([crossed1, 'c', b], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_name_leaf_keys_ordered_alphabetically(self):
"""Tests that the name does not depend on the order of given columns."""
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d2', 'c'], 10)
crossed2 = fc.crossed_column([crossed1, 'd1', b], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name)
def test_var_scope_name(self):
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([b, 'c', crossed1], 10)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2._var_scope_name)
def test_parse_spec(self):
a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed = fc.crossed_column([b, 'c'], 10)
self.assertEqual({
'a': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32),
'c': parsing_ops.VarLenFeature(dtypes.string),
}, crossed._parse_example_spec)
def test_num_buckets(self):
a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed = fc.crossed_column([b, 'c'], 15)
self.assertEqual(15, crossed._num_buckets)
def test_deep_copy(self):
a = fc.numeric_column('a', dtype=dtypes.int32)
b = fc.bucketized_column(a, boundaries=[0, 1])
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5)
crossed2_copy = copy.deepcopy(crossed2)
self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2_copy.name,)
self.assertEqual(15, crossed2_copy.hash_bucket_size)
self.assertEqual(5, crossed2_copy.hash_key)
def test_parse_example(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
price_cross_wire = fc.crossed_column([bucketized_price, 'wire'], 10)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'price':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[20., 110.])),
'wire':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer'])),
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([price_cross_wire]))
self.assertIn('price', features)
self.assertIn('wire', features)
with self.test_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
wire_sparse = features['wire']
self.assertAllEqual([[0, 0], [0, 1]], wire_sparse.indices.eval())
# Use byte constants to pass the open-source test.
self.assertAllEqual([b'omar', b'stringer'], wire_sparse.values.eval())
self.assertAllEqual([1, 2], wire_sparse.dense_shape.eval())
def test_transform_feature(self):
price = fc.numeric_column('price', shape=[2])
bucketized_price = fc.bucketized_column(price, boundaries=[0, 50])
hash_bucket_size = 10
price_cross_wire = fc.crossed_column(
[bucketized_price, 'wire'], hash_bucket_size)
features = {
'price': constant_op.constant([[1., 2.], [5., 6.]]),
'wire': sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2]),
}
outputs = _transform_features(features, [price_cross_wire])
output = outputs[price_cross_wire]
with self.test_session() as sess:
output_val = sess.run(output)
self.assertAllEqual(
[[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [1, 3]], output_val.indices)
for val in output_val.values:
self.assertIn(val, list(range(hash_bucket_size)))
self.assertAllEqual([2, 4], output_val.dense_shape)
def test_get_sparse_tensors(self):
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed1 = fc.crossed_column(['d1', 'd2'], 10)
crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5)
with ops.Graph().as_default():
builder = _LazyBuilder({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
'd1':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['d1A', 'd1B', 'd1C'],
dense_shape=(2, 2)),
'd2':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['d2A', 'd2B', 'd2C'],
dense_shape=(2, 2)),
})
id_weight_pair = crossed2._get_sparse_tensors(builder)
with _initialized_session():
id_tensor_eval = id_weight_pair.id_tensor.eval()
self.assertAllEqual(
((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5),
(1, 6), (1, 7), (1, 8), (1, 9), (1, 10), (1, 11), (1, 12), (1, 13),
(1, 14), (1, 15)),
id_tensor_eval.indices)
# Check exact hashed output. If hashing changes this test will break.
# All values are within [0, hash_bucket_size).
expected_values = (
6, 14, 0, 13, 8, 8, 10, 12, 2, 0, 1, 9, 8, 12, 2, 0, 10, 11)
self.assertAllEqual(expected_values, id_tensor_eval.values)
self.assertAllEqual((2, 16), id_tensor_eval.dense_shape)
def test_get_sparse_tensors_simple(self):
"""Same as test_get_sparse_tensors, but with simpler values."""
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
builder = _LazyBuilder({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
})
id_weight_pair = crossed._get_sparse_tensors(builder)
with _initialized_session():
id_tensor_eval = id_weight_pair.id_tensor.eval()
self.assertAllEqual(
((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3)),
id_tensor_eval.indices)
# Check exact hashed output. If hashing changes this test will break.
# All values are within [0, hash_bucket_size).
expected_values = (1, 0, 1, 3, 4, 2)
self.assertAllEqual(expected_values, id_tensor_eval.values)
self.assertAllEqual((2, 4), id_tensor_eval.dense_shape)
def test_linear_model(self):
"""Tests linear_model.
Uses data from test_get_sparse_tesnsors_simple.
"""
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
predictions = fc.linear_model({
'a': constant_op.constant(((-1., .5), (.5, 1.))),
'c': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
bias = get_linear_model_bias()
crossed_var = get_linear_model_column_var(crossed)
with _initialized_session() as sess:
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(
((0.,), (0.,), (0.,), (0.,), (0.,)), crossed_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,))))
# Expected ids after cross = (1, 0, 1, 3, 4, 2)
self.assertAllClose(((3.,), (14.,)), predictions.eval())
sess.run(bias.assign((.1,)))
self.assertAllClose(((3.1,), (14.1,)), predictions.eval())
def test_linear_model_with_weights(self):
class _TestColumnWithWeights(_CategoricalColumn):
"""Produces sparse IDs and sparse weights."""
@property
def name(self):
return 'test_column'
@property
def _parse_example_spec(self):
return {
self.name: parsing_ops.VarLenFeature(dtypes.int32),
'{}_weights'.format(self.name): parsing_ops.VarLenFeature(
dtypes.float32),
}
@property
def _num_buckets(self):
return 5
def _transform_feature(self, inputs):
return (inputs.get(self.name),
inputs.get('{}_weights'.format(self.name)))
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Populates both id_tensor and weight_tensor."""
ids_and_weights = inputs.get(self)
return _CategoricalColumn.IdWeightPair(
id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1])
t = _TestColumnWithWeights()
crossed = fc.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
'crossed_column does not support weight_tensor.*{}'.format(t.name)):
fc.linear_model({
t.name: sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[0, 1, 2],
dense_shape=(2, 2)),
'{}_weights'.format(t.name): sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[1., 10., 2.],
dense_shape=(2, 2)),
'c': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
def test_keras_linear_model(self):
"""Tests _LinearModel.
Uses data from test_get_sparse_tesnsors_simple.
"""
a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,))
b = fc.bucketized_column(a, boundaries=(0, 1))
crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
'a':
constant_op.constant(((-1., .5), (.5, 1.))),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
bias = get_linear_model_bias()
crossed_var = get_linear_model_column_var(crossed)
with _initialized_session() as sess:
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)),
crossed_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,))))
# Expected ids after cross = (1, 0, 1, 3, 4, 2)
self.assertAllClose(((3.,), (14.,)), predictions.eval())
sess.run(bias.assign((.1,)))
self.assertAllClose(((3.1,), (14.1,)), predictions.eval())
def test_keras_linear_model_with_weights(self):
class _TestColumnWithWeights(_CategoricalColumn):
"""Produces sparse IDs and sparse weights."""
@property
def name(self):
return 'test_column'
@property
def _parse_example_spec(self):
return {
self.name:
parsing_ops.VarLenFeature(dtypes.int32),
'{}_weights'.format(self.name):
parsing_ops.VarLenFeature(dtypes.float32),
}
@property
def _num_buckets(self):
return 5
def _transform_feature(self, inputs):
return (inputs.get(self.name),
inputs.get('{}_weights'.format(self.name)))
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
"""Populates both id_tensor and weight_tensor."""
ids_and_weights = inputs.get(self)
return _CategoricalColumn.IdWeightPair(
id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1])
t = _TestColumnWithWeights()
crossed = fc.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5)
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
'crossed_column does not support weight_tensor.*{}'.format(t.name)):
get_keras_linear_model_predictions({
t.name:
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[0, 1, 2],
dense_shape=(2, 2)),
'{}_weights'.format(t.name):
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=[1., 10., 2.],
dense_shape=(2, 2)),
'c':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=['cA', 'cB', 'cC'],
dense_shape=(2, 2)),
}, (crossed,))
def get_linear_model_bias(name='linear_model'):
with variable_scope.variable_scope(name, reuse=True):
return variable_scope.get_variable('bias_weights')
def get_linear_model_column_var(column, name='linear_model'):
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
name + '/' + column.name)[0]
def get_keras_linear_model_predictions(features,
feature_columns,
units=1,
sparse_combiner='sum',
weight_collections=None,
trainable=True,
cols_to_vars=None):
keras_linear_model = _LinearModel(
feature_columns,
units,
sparse_combiner,
weight_collections,
trainable,
name='linear_model')
retval = keras_linear_model(features) # pylint: disable=not-callable
if cols_to_vars is not None:
cols_to_vars.update(keras_linear_model.cols_to_vars())
return retval
class LinearModelTest(test.TestCase):
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegexp(ValueError,
'feature_columns must not be empty'):
fc.linear_model(features={}, feature_columns=[])
def test_should_be_feature_column(self):
with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'):
fc.linear_model(features={'a': [[0]]}, feature_columns='NotSupported')
def test_should_be_dense_or_categorical_column(self):
class NotSupportedColumn(_FeatureColumn):
@property
def name(self):
return 'NotSupportedColumn'
def _transform_feature(self, cache):
pass
@property
def _parse_example_spec(self):
pass
with self.assertRaisesRegexp(
ValueError, 'must be either a _DenseColumn or _CategoricalColumn'):
fc.linear_model(
features={'a': [[0]]}, feature_columns=[NotSupportedColumn()])
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegexp(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc.linear_model(
features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')})
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegexp(
ValueError, 'Duplicate feature column name found for columns'):
fc.linear_model(
features={'a': [[0]]},
feature_columns=[fc.numeric_column('a'),
fc.numeric_column('a')])
def test_dense_bias(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
sess.run(price_var.assign([[10.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[15.], [55.]], predictions.eval())
def test_sparse_bias(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc.linear_model(features, [wire_cast])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.], [0.], [0.]], wire_cast_var.eval())
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], predictions.eval())
def test_dense_and_sparse_bias(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
price = fc.numeric_column('price')
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]}
predictions = fc.linear_model(features, [wire_cast, price])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[1015.], [10065.]], predictions.eval())
def test_dense_and_sparse_column(self):
"""When the column is both dense and sparse, uses sparse tensors."""
class _DenseAndSparseColumn(_DenseColumn, _CategoricalColumn):
@property
def name(self):
return 'dense_and_sparse_column'
@property
def _parse_example_spec(self):
return {self.name: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
return inputs.get(self.name)
@property
def _variable_shape(self):
raise ValueError('Should not use this method.')
def _get_dense_tensor(self, inputs, weight_collections=None,
trainable=None):
raise ValueError('Should not use this method.')
@property
def _num_buckets(self):
return 4
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
sp_tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0], [1, 1]],
values=[2, 0, 3],
dense_shape=[2, 2])
return _CategoricalColumn.IdWeightPair(sp_tensor, None)
dense_and_sparse_column = _DenseAndSparseColumn()
with ops.Graph().as_default():
sp_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {dense_and_sparse_column.name: sp_tensor}
predictions = fc.linear_model(features, [dense_and_sparse_column])
bias = get_linear_model_bias()
dense_and_sparse_column_var = get_linear_model_column_var(
dense_and_sparse_column)
with _initialized_session() as sess:
sess.run(dense_and_sparse_column_var.assign(
[[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], predictions.eval())
def test_dense_multi_output(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = fc.linear_model(features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((1, 3)), price_var.eval())
sess.run(price_var.assign([[10., 100., 1000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]],
predictions.eval())
def test_sparse_multi_output(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc.linear_model(features, [wire_cast], units=3)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((4, 3)), wire_cast_var.eval())
sess.run(
wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.], [
1000., 1100., 1200.
], [10000., 11000., 12000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]],
predictions.eval())
def test_dense_multi_dimension(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = fc.linear_model(features, [price])
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([[0.], [0.]], price_var.eval())
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], predictions.eval())
def test_sparse_multi_rank(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = array_ops.sparse_placeholder(dtypes.string)
wire_value = sparse_tensor.SparseTensorValue(
values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2]
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]],
dense_shape=[2, 2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc.linear_model(features, [wire_cast])
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((4, 1)), wire_cast_var.eval())
self.assertAllClose(
np.zeros((2, 1)),
predictions.eval(feed_dict={wire_tensor: wire_value}))
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
self.assertAllClose(
[[1010.], [11000.]],
predictions.eval(feed_dict={wire_tensor: wire_value}))
def test_sparse_combiner(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = fc.linear_model(
features, [wire_cast], sparse_combiner='mean')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [5010.]], predictions.eval())
def test_sparse_combiner_with_negative_weights(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
wire_cast_weights = fc.weighted_categorical_column(wire_cast, 'weights')
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {
'wire_cast': wire_tensor,
'weights': constant_op.constant([[1., 1., -1.0]])
}
predictions = fc.linear_model(
features, [wire_cast_weights], sparse_combiner='sum')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [-9985.]], predictions.eval())
def test_dense_multi_dimension_multi_output(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = fc.linear_model(features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((2, 3)), price_var.eval())
sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]]))
sess.run(bias.assign([2., 3., 4.]))
self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]],
predictions.eval())
def test_raises_if_shape_mismatch(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegexp(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
fc.linear_model(features, [price])
def test_dense_reshaping(self):
price = fc.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
predictions = fc.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.]], price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], predictions.eval())
def test_dense_multi_column(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [5., 6.]],
'price2': [[3.], [4.]]
}
predictions = fc.linear_model(features, [price1, price2])
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.]], price1_var.eval())
self.assertAllClose([[0.]], price2_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price1_var.assign([[10.], [100.]]))
sess.run(price2_var.assign([[1000.]]))
sess.run(bias.assign([7.]))
self.assertAllClose([[3217.], [4657.]], predictions.eval())
def test_fills_cols_to_vars(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
cols_to_vars = {}
fc.linear_model(features, [price1, price2], cols_to_vars=cols_to_vars)
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
self.assertAllEqual(cols_to_vars['bias'], [bias])
self.assertAllEqual(cols_to_vars[price1], [price1_var])
self.assertAllEqual(cols_to_vars[price2], [price2_var])
def test_fills_cols_to_vars_partitioned_variables(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2', shape=3)
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [6., 7.]],
'price2': [[3., 4., 5.], [8., 9., 10.]]
}
cols_to_vars = {}
with variable_scope.variable_scope(
'linear',
partitioner=partitioned_variables.fixed_size_partitioner(2, axis=0)):
fc.linear_model(features, [price1, price2], cols_to_vars=cols_to_vars)
with _initialized_session():
self.assertEqual([0.], cols_to_vars['bias'][0].eval())
# Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables.
self.assertAllEqual([[0.]], cols_to_vars[price1][0].eval())
self.assertAllEqual([[0.]], cols_to_vars[price1][1].eval())
# Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and
# a [1, 1] Variable.
self.assertAllEqual([[0.], [0.]], cols_to_vars[price2][0].eval())
self.assertAllEqual([[0.]], cols_to_vars[price2][1].eval())
def test_dense_collection(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc.linear_model(features, [price], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
self.assertIn(bias, my_vars)
self.assertIn(price_var, my_vars)
def test_sparse_collection(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc.linear_model(
features, [wire_cast], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, my_vars)
self.assertIn(wire_cast_var, my_vars)
def test_dense_trainable_default(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc.linear_model(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertIn(bias, trainable_vars)
self.assertIn(price_var, trainable_vars)
def test_sparse_trainable_default(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc.linear_model(features, [wire_cast])
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, trainable_vars)
self.assertIn(wire_cast_var, trainable_vars)
def test_dense_trainable_false(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
fc.linear_model(features, [price], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_sparse_trainable_false(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
fc.linear_model(features, [wire_cast], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_column_order(self):
price_a = fc.numeric_column('price_a')
price_b = fc.numeric_column('price_b')
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
fc.linear_model(
features, [price_a, wire_cast, price_b],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
fc.linear_model(
features, [wire_cast, price_b, price_a],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
def test_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.linear_model(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
price3 = fc.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.linear_model(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
predictions = fc.linear_model(features, [price1, price2])
with _initialized_session() as sess:
with self.assertRaisesRegexp(errors.OpError,
'must have the same size and shape'):
sess.run(
predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
predictions = fc.linear_model(features, [price1, price2])
with _initialized_session() as sess:
sess.run(
predictions,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_with_numpy_input_fn(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(price, boundaries=[0., 10., 100.,])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([-1., 2., 13., 104.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
net = fc.linear_model(features, [price_buckets, body_style])
# self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [100 - 10 + 5.]], sess.run(net))
coord.request_stop()
coord.join(threads)
def test_with_1d_sparse_tensor(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(price, boundaries=[0., 10., 100.,])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# Provides 1-dim tensor and dense tensor.
features = {
'price': constant_op.constant([-1., 12.,]),
'body-style': sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
net = fc.linear_model(features, [price_buckets, body_style])
with _initialized_session() as sess:
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(price, boundaries=[0., 10., 100.,])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
price_data = np.array([-1., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,))
country_data = np.array(['US', 'CA'])
net = fc.linear_model(features, [price_buckets, body_style, country])
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
with _initialized_session() as sess:
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
def test_with_rank_0_feature(self):
price = fc.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
fc.linear_model(features, [price])
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = fc.linear_model(features, [price])
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
def test_multiple_linear_models(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features1 = {'price': [[1.], [5.]]}
features2 = {'price': [[2.], [10.]]}
predictions1 = fc.linear_model(features1, [price])
predictions2 = fc.linear_model(features2, [price])
bias1 = get_linear_model_bias(name='linear_model')
bias2 = get_linear_model_bias(name='linear_model_1')
price_var1 = get_linear_model_column_var(price, name='linear_model')
price_var2 = get_linear_model_column_var(price, name='linear_model_1')
with _initialized_session() as sess:
self.assertAllClose([0.], bias1.eval())
sess.run(price_var1.assign([[10.]]))
sess.run(bias1.assign([5.]))
self.assertAllClose([[15.], [55.]], predictions1.eval())
self.assertAllClose([0.], bias2.eval())
sess.run(price_var2.assign([[10.]]))
sess.run(bias2.assign([5.]))
self.assertAllClose([[25.], [105.]], predictions2.eval())
class _LinearModelTest(test.TestCase):
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegexp(ValueError,
'feature_columns must not be empty'):
get_keras_linear_model_predictions(features={}, feature_columns=[])
def test_should_be_feature_column(self):
with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'):
get_keras_linear_model_predictions(
features={'a': [[0]]}, feature_columns='NotSupported')
def test_should_be_dense_or_categorical_column(self):
class NotSupportedColumn(_FeatureColumn):
@property
def name(self):
return 'NotSupportedColumn'
def _transform_feature(self, cache):
pass
@property
def _parse_example_spec(self):
pass
with self.assertRaisesRegexp(
ValueError, 'must be either a _DenseColumn or _CategoricalColumn'):
get_keras_linear_model_predictions(
features={'a': [[0]]}, feature_columns=[NotSupportedColumn()])
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegexp(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc.linear_model(
features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')})
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegexp(
ValueError, 'Duplicate feature column name found for columns'):
get_keras_linear_model_predictions(
features={'a': [[0]]},
feature_columns=[fc.numeric_column('a'),
fc.numeric_column('a')])
def test_dense_bias(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = get_keras_linear_model_predictions(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
sess.run(price_var.assign([[10.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[15.], [55.]], predictions.eval())
def test_sparse_bias(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = get_keras_linear_model_predictions(features, [wire_cast])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.], [0.], [0.]], wire_cast_var.eval())
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], predictions.eval())
def test_dense_and_sparse_bias(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
price = fc.numeric_column('price')
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]}
predictions = get_keras_linear_model_predictions(features,
[wire_cast, price])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[1015.], [10065.]], predictions.eval())
def test_dense_and_sparse_column(self):
"""When the column is both dense and sparse, uses sparse tensors."""
class _DenseAndSparseColumn(_DenseColumn, _CategoricalColumn):
@property
def name(self):
return 'dense_and_sparse_column'
@property
def _parse_example_spec(self):
return {self.name: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
return inputs.get(self.name)
@property
def _variable_shape(self):
raise ValueError('Should not use this method.')
def _get_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
raise ValueError('Should not use this method.')
@property
def _num_buckets(self):
return 4
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
sp_tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0], [1, 1]],
values=[2, 0, 3],
dense_shape=[2, 2])
return _CategoricalColumn.IdWeightPair(sp_tensor, None)
dense_and_sparse_column = _DenseAndSparseColumn()
with ops.Graph().as_default():
sp_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {dense_and_sparse_column.name: sp_tensor}
predictions = get_keras_linear_model_predictions(
features, [dense_and_sparse_column])
bias = get_linear_model_bias()
dense_and_sparse_column_var = get_linear_model_column_var(
dense_and_sparse_column)
with _initialized_session() as sess:
sess.run(
dense_and_sparse_column_var.assign([[10.], [100.], [1000.],
[10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], predictions.eval())
def test_dense_multi_output(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = get_keras_linear_model_predictions(
features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((1, 3)), price_var.eval())
sess.run(price_var.assign([[10., 100., 1000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]],
predictions.eval())
def test_sparse_multi_output(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = get_keras_linear_model_predictions(
features, [wire_cast], units=3)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((4, 3)), wire_cast_var.eval())
sess.run(
wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.],
[1000., 1100.,
1200.], [10000., 11000., 12000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]],
predictions.eval())
def test_dense_multi_dimension(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = get_keras_linear_model_predictions(features, [price])
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([[0.], [0.]], price_var.eval())
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], predictions.eval())
def test_sparse_multi_rank(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = array_ops.sparse_placeholder(dtypes.string)
wire_value = sparse_tensor.SparseTensorValue(
values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2]
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]],
dense_shape=[2, 2, 2])
features = {'wire_cast': wire_tensor}
predictions = get_keras_linear_model_predictions(features, [wire_cast])
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((4, 1)), wire_cast_var.eval())
self.assertAllClose(
np.zeros((2, 1)),
predictions.eval(feed_dict={wire_tensor: wire_value}))
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
self.assertAllClose(
[[1010.], [11000.]],
predictions.eval(feed_dict={wire_tensor: wire_value}))
def test_sparse_combiner(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = get_keras_linear_model_predictions(
features, [wire_cast], sparse_combiner='mean')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [5010.]], predictions.eval())
def test_dense_multi_dimension_multi_output(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = get_keras_linear_model_predictions(
features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), bias.eval())
self.assertAllClose(np.zeros((2, 3)), price_var.eval())
sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]]))
sess.run(bias.assign([2., 3., 4.]))
self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]],
predictions.eval())
def test_raises_if_shape_mismatch(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegexp(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
get_keras_linear_model_predictions(features, [price])
def test_dense_reshaping(self):
price = fc.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
predictions = get_keras_linear_model_predictions(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.]], price_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], predictions.eval())
def test_dense_multi_column(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
predictions = get_keras_linear_model_predictions(features,
[price1, price2])
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
with _initialized_session() as sess:
self.assertAllClose([0.], bias.eval())
self.assertAllClose([[0.], [0.]], price1_var.eval())
self.assertAllClose([[0.]], price2_var.eval())
self.assertAllClose([[0.], [0.]], predictions.eval())
sess.run(price1_var.assign([[10.], [100.]]))
sess.run(price2_var.assign([[1000.]]))
sess.run(bias.assign([7.]))
self.assertAllClose([[3217.], [4657.]], predictions.eval())
def test_fills_cols_to_vars(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
cols_to_vars = {}
get_keras_linear_model_predictions(
features, [price1, price2], cols_to_vars=cols_to_vars)
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
self.assertAllEqual(cols_to_vars['bias'], [bias])
self.assertAllEqual(cols_to_vars[price1], [price1_var])
self.assertAllEqual(cols_to_vars[price2], [price2_var])
def test_fills_cols_to_vars_partitioned_variables(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2', shape=3)
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [6., 7.]],
'price2': [[3., 4., 5.], [8., 9., 10.]]
}
cols_to_vars = {}
with variable_scope.variable_scope(
'linear',
partitioner=partitioned_variables.fixed_size_partitioner(2, axis=0)):
get_keras_linear_model_predictions(
features, [price1, price2], cols_to_vars=cols_to_vars)
with _initialized_session():
self.assertEqual([0.], cols_to_vars['bias'][0].eval())
# Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables.
self.assertAllEqual([[0.]], cols_to_vars[price1][0].eval())
self.assertAllEqual([[0.]], cols_to_vars[price1][1].eval())
# Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and
# a [1, 1] Variable.
self.assertAllEqual([[0.], [0.]], cols_to_vars[price2][0].eval())
self.assertAllEqual([[0.]], cols_to_vars[price2][1].eval())
def test_dense_collection(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
get_keras_linear_model_predictions(
features, [price], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
self.assertIn(bias, my_vars)
self.assertIn(price_var, my_vars)
def test_sparse_collection(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
get_keras_linear_model_predictions(
features, [wire_cast], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, my_vars)
self.assertIn(wire_cast_var, my_vars)
def test_dense_trainable_default(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
get_keras_linear_model_predictions(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertIn(bias, trainable_vars)
self.assertIn(price_var, trainable_vars)
def test_sparse_trainable_default(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
get_keras_linear_model_predictions(features, [wire_cast])
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, trainable_vars)
self.assertIn(wire_cast_var, trainable_vars)
def test_dense_trainable_false(self):
price = fc.numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
get_keras_linear_model_predictions(features, [price], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_sparse_trainable_false(self):
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
get_keras_linear_model_predictions(features, [wire_cast], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_column_order(self):
price_a = fc.numeric_column('price_a')
price_b = fc.numeric_column('price_b')
wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
get_keras_linear_model_predictions(
features, [price_a, wire_cast, price_b],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
get_keras_linear_model_predictions(
features, [wire_cast, price_b, price_a],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
def test_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
get_keras_linear_model_predictions(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
price3 = fc.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
get_keras_linear_model_predictions(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
predictions = get_keras_linear_model_predictions(features,
[price1, price2])
with _initialized_session() as sess:
with self.assertRaisesRegexp(errors.OpError,
'must have the same size and shape'):
sess.run(
predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
predictions = get_keras_linear_model_predictions(features,
[price1, price2])
with _initialized_session() as sess:
sess.run(
predictions,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_with_numpy_input_fn(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([-1., 2., 13., 104.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
net = get_keras_linear_model_predictions(features,
[price_buckets, body_style])
# self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [100 - 10 + 5.]], sess.run(net))
coord.request_stop()
coord.join(threads)
def test_with_1d_sparse_tensor(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# Provides 1-dim tensor and dense tensor.
features = {
'price':
constant_op.constant([
-1.,
12.,
]),
'body-style':
sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
net = get_keras_linear_model_predictions(features,
[price_buckets, body_style])
with _initialized_session() as sess:
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
price = fc.numeric_column('price')
price_buckets = fc.bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
price_data = np.array([-1., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,))
country_data = np.array(['US', 'CA'])
net = get_keras_linear_model_predictions(
features, [price_buckets, body_style, country])
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
with _initialized_session() as sess:
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
def test_with_rank_0_feature(self):
price = fc.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
get_keras_linear_model_predictions(features, [price])
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = get_keras_linear_model_predictions(features, [price])
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
class InputLayerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_retrieving_input(self):
features = {'a': [0.]}
input_layer = InputLayer(fc.numeric_column('a'))
inputs = self.evaluate(input_layer(features))
self.assertAllClose([[0.]], inputs)
def test_reuses_variables(self):
with context.eager_mode():
sparse_input = sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = fc.categorical_column_with_identity(key='a',
num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
input_layer = InputLayer([embedding_column])
features = {'a': sparse_input}
inputs = input_layer(features)
variables = input_layer.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(1, len(variables))
# Check that invoking input_layer on the same features does not create
# additional variables
_ = input_layer(features)
self.assertEqual(1, len(variables))
self.assertEqual(variables[0], input_layer.variables[0])
def test_feature_column_input_layer_gradient(self):
with context.eager_mode():
sparse_input = sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = fc.categorical_column_with_identity(key='a',
num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
input_layer = InputLayer([embedding_column])
features = {'a': sparse_input}
def scale_matrix():
matrix = input_layer(features)
return 2 * matrix
# Sanity check: Verify that scale_matrix returns the correct output.
self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix())
# Check that the returned gradient is correct.
grad_function = backprop.implicit_grad(scale_matrix)
grads_and_vars = grad_function()
indexed_slice = grads_and_vars[0][0]
gradient = grads_and_vars[0][0].values
self.assertAllEqual([0, 1, 2], indexed_slice.indices)
self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient)
class FunctionalInputLayerTest(test.TestCase):
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegexp(ValueError,
'feature_columns must not be empty'):
fc.input_layer(features={}, feature_columns=[])
def test_should_be_dense_column(self):
with self.assertRaisesRegexp(ValueError, 'must be a _DenseColumn'):
fc.input_layer(
features={'a': [[0]]},
feature_columns=[
fc.categorical_column_with_hash_bucket('wire_cast', 4)
])
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegexp(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc.input_layer(
features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')})
def test_bare_column(self):
with ops.Graph().as_default():
features = features = {'a': [0.]}
net = fc.input_layer(features, fc.numeric_column('a'))
with _initialized_session():
self.assertAllClose([[0.]], net.eval())
def test_column_generator(self):
with ops.Graph().as_default():
features = features = {'a': [0.], 'b': [1.]}
columns = (fc.numeric_column(key) for key in features)
net = fc.input_layer(features, columns)
with _initialized_session():
self.assertAllClose([[0., 1.]], net.eval())
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegexp(
ValueError, 'Duplicate feature column name found for columns'):
fc.input_layer(
features={'a': [[0]]},
feature_columns=[fc.numeric_column('a'),
fc.numeric_column('a')])
def test_one_column(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
net = fc.input_layer(features, [price])
with _initialized_session():
self.assertAllClose([[1.], [5.]], net.eval())
def test_multi_dimension(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
net = fc.input_layer(features, [price])
with _initialized_session():
self.assertAllClose([[1., 2.], [5., 6.]], net.eval())
def test_raises_if_shape_mismatch(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegexp(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
fc.input_layer(features, [price])
def test_reshaping(self):
price = fc.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
net = fc.input_layer(features, [price])
with _initialized_session():
self.assertAllClose([[1., 2.], [5., 6.]], net.eval())
def test_multi_column(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [5., 6.]],
'price2': [[3.], [4.]]
}
net = fc.input_layer(features, [price1, price2])
with _initialized_session():
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], net.eval())
def test_fills_cols_to_vars(self):
# Provide three _DenseColumn's to input_layer: a _NumericColumn, a
# _BucketizedColumn, and an _EmbeddingColumn. Only the _EmbeddingColumn
# creates a Variable.
price1 = fc.numeric_column('price1')
dense_feature = fc.numeric_column('dense_feature')
dense_feature_bucketized = fc.bucketized_column(
dense_feature, boundaries=[0.])
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'price1': [[3.], [4.]],
'dense_feature': [[-1.], [4.]],
'sparse_feature': [['a'], ['x']],
}
cols_to_vars = {}
all_cols = [price1, dense_feature_bucketized, some_embedding_column]
fc.input_layer(features, all_cols, cols_to_vars=cols_to_vars)
self.assertItemsEqual(list(cols_to_vars.keys()), all_cols)
self.assertEqual(0, len(cols_to_vars[price1]))
self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized]))
self.assertEqual(1, len(cols_to_vars[some_embedding_column]))
self.assertIsInstance(cols_to_vars[some_embedding_column][0],
variables_lib.Variable)
self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10])
def test_fills_cols_to_vars_partitioned_variables(self):
price1 = fc.numeric_column('price1')
dense_feature = fc.numeric_column('dense_feature')
dense_feature_bucketized = fc.bucketized_column(
dense_feature, boundaries=[0.])
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'price1': [[3.], [4.]],
'dense_feature': [[-1.], [4.]],
'sparse_feature': [['a'], ['x']],
}
cols_to_vars = {}
all_cols = [price1, dense_feature_bucketized, some_embedding_column]
with variable_scope.variable_scope(
'input_from_feature_columns',
partitioner=partitioned_variables.fixed_size_partitioner(3, axis=0)):
fc.input_layer(features, all_cols, cols_to_vars=cols_to_vars)
self.assertItemsEqual(list(cols_to_vars.keys()), all_cols)
self.assertEqual(0, len(cols_to_vars[price1]))
self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized]))
self.assertEqual(3, len(cols_to_vars[some_embedding_column]))
self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [2, 10])
self.assertAllEqual(cols_to_vars[some_embedding_column][1].shape, [2, 10])
self.assertAllEqual(cols_to_vars[some_embedding_column][2].shape, [1, 10])
def test_column_order(self):
price_a = fc.numeric_column('price_a')
price_b = fc.numeric_column('price_b')
with ops.Graph().as_default():
features = {
'price_a': [[1.]],
'price_b': [[3.]],
}
net1 = fc.input_layer(features, [price_a, price_b])
net2 = fc.input_layer(features, [price_b, price_a])
with _initialized_session():
self.assertAllClose([[1., 3.]], net1.eval())
self.assertAllClose([[1., 3.]], net2.eval())
def test_fails_for_categorical_column(self):
animal = fc.categorical_column_with_identity('animal', num_buckets=4)
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
with self.assertRaisesRegexp(Exception, 'must be a _DenseColumn'):
fc.input_layer(features, [animal])
def test_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.input_layer(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
price3 = fc.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.input_layer(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
net = fc.input_layer(features, [price1, price2])
with _initialized_session() as sess:
with self.assertRaisesRegexp(errors.OpError,
'Dimensions of inputs should match'):
sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
net = fc.input_layer(features, [price1, price2])
with _initialized_session() as sess:
sess.run(
net,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_multiple_layers_with_same_embedding_column(self):
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'sparse_feature': [['a'], ['x']],
}
all_cols = [some_embedding_column]
fc.input_layer(features, all_cols)
fc.input_layer(features, all_cols)
# Make sure that 2 variables get created in this case.
self.assertEqual(2, len(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
expected_var_names = [
'input_layer/sparse_feature_embedding/embedding_weights:0',
'input_layer_1/sparse_feature_embedding/embedding_weights:0'
]
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
def test_multiple_layers_with_same_shared_embedding_column(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = fc.shared_embedding_columns(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
with ops.Graph().as_default():
features = {
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
all_cols = [embedding_column_a, embedding_column_b]
fc.input_layer(features, all_cols)
fc.input_layer(features, all_cols)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1, len(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertItemsEqual(
['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'],
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
def test_multiple_layers_with_same_shared_embedding_column_diff_graphs(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = fc.shared_embedding_columns(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
all_cols = [embedding_column_a, embedding_column_b]
with ops.Graph().as_default():
features = {
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
fc.input_layer(features, all_cols)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1, len(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
with ops.Graph().as_default():
features1 = {
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
fc.input_layer(features1, all_cols)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(1, len(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
self.assertItemsEqual(
['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'],
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
def test_with_numpy_input_fn(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# one_hot_body_style has 3 dims in input_layer.
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
embedded_body_style = fc.embedding_column(body_style, dimension=5,
initializer=_initializer)
input_fn = numpy_io.numpy_input_fn(
x={
'price': np.array([11., 12., 13., 14.]),
'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']),
},
batch_size=2,
shuffle=False)
features = input_fn()
net = fc.input_layer(features,
[price, one_hot_body_style, embedded_body_style])
self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess, coord=coord)
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[11., 12., 13., 14., 15., 0., 0., 1., 11.],
[1., 2., 3., 4., 5., 1., 0., 0., 12]],
sess.run(net))
coord.request_stop()
coord.join(threads)
def test_with_1d_sparse_tensor(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
# one_hot_body_style has 3 dims in input_layer.
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = fc.embedding_column(country, dimension=5,
initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price': constant_op.constant([11., 12.,]),
'body-style': sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
# This is dense tensor for the categorical_column.
'country': constant_op.constant(['CA', 'US']),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
self.assertEqual(1, features['country'].shape.ndims)
net = fc.input_layer(features,
[price, one_hot_body_style, embedded_country])
self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[0., 0., 1., 11., 12., 13., 14., 15., 11.],
[1., 0., 0., 1., 2., 3., 4., 5., 12.]],
sess.run(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
embedding_values = (
(1., 2.), # id 0
(6., 7.), # id 1
(11., 12.) # id 2
)
def _initializer(shape, dtype, partition_info):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
# one_hot_body_style has 3 dims in input_layer.
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = fc.embedding_column(
country, dimension=2, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
# This is dense tensor for the categorical_column.
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
self.assertIsNone(features['country'].shape.ndims)
price_data = np.array([11., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,))
country_data = np.array([['US'], ['CA']])
net = fc.input_layer(features,
[price, one_hot_body_style, embedded_country])
self.assertEqual(1 + 3 + 2, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
def test_with_rank_0_feature(self):
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'):
fc.input_layer(features, [price])
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = fc.input_layer(features, [price])
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
class MakeParseExampleSpecTest(test.TestCase):
class _TestFeatureColumn(_FeatureColumn,
collections.namedtuple('_TestFeatureColumn',
['parse_spec'])):
@property
def _parse_example_spec(self):
return self.parse_spec
def test_no_feature_columns(self):
actual = fc.make_parse_example_spec([])
self.assertDictEqual({}, actual)
def test_invalid_type(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
with self.assertRaisesRegexp(
ValueError,
'All feature_columns must be _FeatureColumn instances.*invalid_column'):
fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}), 'invalid_column'))
def test_one_feature_column(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),))
self.assertDictEqual({key1: parse_spec1}, actual)
def test_two_feature_columns(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
key2 = 'key2'
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key2: parse_spec2})))
self.assertDictEqual({key1: parse_spec1, key2: parse_spec2}, actual)
def test_equal_keys_different_parse_spec(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
with self.assertRaisesRegexp(
ValueError,
'feature_columns contain different parse_spec for key key1'):
fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key1: parse_spec2})))
def test_equal_keys_equal_parse_spec(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key1: parse_spec1})))
self.assertDictEqual({key1: parse_spec1}, actual)
def test_multiple_features_dict(self):
"""parse_spc for one column is a dict with length > 1."""
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
key2 = 'key2'
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
key3 = 'key3'
parse_spec3 = parsing_ops.VarLenFeature(dtype=dtypes.int32)
actual = fc.make_parse_example_spec(
(self._TestFeatureColumn({key1: parse_spec1}),
self._TestFeatureColumn({key2: parse_spec2, key3: parse_spec3})))
self.assertDictEqual(
{key1: parse_spec1, key2: parse_spec2, key3: parse_spec3}, actual)
def _assert_sparse_tensor_value(test_case, expected, actual):
test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case.assertAllEqual(expected.indices, actual.indices)
test_case.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case.assertAllEqual(expected.values, actual.values)
test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
class VocabularyFileCategoricalColumnTest(test.TestCase):
def setUp(self):
super(VocabularyFileCategoricalColumnTest, self).setUp()
# Contains ints, Golden State Warriors jersey numbers: 30, 35, 11, 23, 22
self._warriors_vocabulary_file_name = test.test_src_dir_path(
'python/feature_column/testdata/warriors_vocabulary.txt')
self._warriors_vocabulary_size = 5
# Contains strings, character names from 'The Wire': omar, stringer, marlo
self._wire_vocabulary_file_name = test.test_src_dir_path(
'python/feature_column/testdata/wire_vocabulary.txt')
self._wire_vocabulary_size = 3
def test_defaults(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3)
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column._var_scope_name)
self.assertEqual('aaa', column.key)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.string)
}, column._parse_example_spec)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.categorical_column_with_vocabulary_file(
key=('aaa',), vocabulary_file='path_to_file', vocabulary_size=3)
def test_all_constructor_args(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3,
num_oov_buckets=4, dtype=dtypes.int32)
self.assertEqual(7, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column._parse_example_spec)
def test_deep_copy(self):
original = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3,
num_oov_buckets=4, dtype=dtypes.int32)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(7, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column._parse_example_spec)
def test_vocabulary_file_none(self):
with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=None, vocabulary_size=3)
def test_vocabulary_file_empty_string(self):
with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='', vocabulary_size=3)
def test_invalid_vocabulary_file(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='file_does_not_exist', vocabulary_size=10)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
with self.assertRaisesRegexp(errors.OpError, 'file_does_not_exist'):
with self.test_session():
lookup_ops.tables_initializer().run()
def test_invalid_vocabulary_size(self):
with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=-1)
with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=0)
def test_too_large_vocabulary_size(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size + 1)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
with self.assertRaisesRegexp(errors.OpError, 'Invalid vocab_size'):
with self.test_session():
lookup_ops.tables_initializer().run()
def test_invalid_num_oov_buckets(self):
with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path', vocabulary_size=3,
num_oov_buckets=-1)
def test_invalid_dtype(self):
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path', vocabulary_size=3,
dtype=dtypes.float64)
def test_invalid_buckets_and_default_value(self):
with self.assertRaisesRegexp(
ValueError, 'both num_oov_buckets and default_value'):
fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=100,
default_value=2)
def test_invalid_input_dtype_int32(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
dtype=dtypes.string)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(12, 24, 36),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
def test_invalid_input_dtype_string(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file='path_to_file', vocabulary_size=3)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_get_sparse_tensors(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_none_vocabulary_size(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa', vocabulary_file=self._wire_vocabulary_file_name)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(
(2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_transform_feature(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_tensor = _transform_features({'aaa': inputs}, [column])[column]
with _initialized_session():
_assert_sparse_tensor_value(self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(
(2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
def test_get_sparse_tensors_weight_collections(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
inputs = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
column._get_sparse_tensors(
_LazyBuilder({
'aaa': inputs
}), weight_collections=('my_weights',))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertItemsEqual([], ops.get_collection('my_weights'))
def test_get_sparse_tensors_dense_input(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
id_weight_pair = column._get_sparse_tensors(
_LazyBuilder({
'aaa': (('marlo', ''), ('skywalker', 'omar'))
}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_default_value_in_vocabulary(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
default_value=2)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 2, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (1, 2)),
values=('marlo', 'skywalker', 'omar', 'heisenberg'),
dense_shape=(2, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 33, 0, 62), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_small_vocabulary_size(self):
# 'marlo' is the last entry in our vocabulary file, so be setting
# `vocabulary_size` to 1 less than number of entries in file, we take
# 'marlo' out of the vocabulary.
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size - 1)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((-1, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_dense_input(self):
default_value = -100
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32,
default_value=default_value)
id_weight_pair = column._get_sparse_tensors(
_LazyBuilder({
'aaa': ((11, -1, -1), (100, 30, -1), (-1, -1, 22))
}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((2, default_value, 0, 4), dtype=np.int64),
dense_shape=(3, 3)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._warriors_vocabulary_file_name,
vocabulary_size=self._warriors_vocabulary_size,
dtype=dtypes.int32,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 60, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_linear_model(self):
wire_column = fc.categorical_column_with_vocabulary_file(
key='wire',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=1)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = fc.linear_model({
wire_column.name: sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), predictions.eval())
def test_keras_linear_model(self):
wire_column = fc.categorical_column_with_vocabulary_file(
key='wire',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size,
num_oov_buckets=1)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
wire_column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), predictions.eval())
class VocabularyListCategoricalColumnTest(test.TestCase):
def test_defaults_string(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column.key)
self.assertEqual('aaa', column._var_scope_name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.string)
}, column._parse_example_spec)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.categorical_column_with_vocabulary_list(
key=('aaa',), vocabulary_list=('omar', 'stringer', 'marlo'))
def test_defaults_int(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36))
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column.key)
self.assertEqual('aaa', column._var_scope_name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, column._parse_example_spec)
def test_all_constructor_args(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32,
default_value=-99)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column._parse_example_spec)
def test_deep_copy(self):
original = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column._parse_example_spec)
def test_invalid_dtype(self):
with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'),
dtype=dtypes.float32)
def test_invalid_mapping_dtype(self):
with self.assertRaisesRegexp(
ValueError, r'vocabulary dtype must be string or integer'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12., 24., 36.))
def test_mismatched_int_dtype(self):
with self.assertRaisesRegexp(
ValueError, r'dtype.*and vocabulary dtype.*do not match'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'),
dtype=dtypes.int32)
def test_mismatched_string_dtype(self):
with self.assertRaisesRegexp(
ValueError, r'dtype.*and vocabulary dtype.*do not match'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.string)
def test_none_mapping(self):
with self.assertRaisesRegexp(
ValueError, r'vocabulary_list.*must be non-empty'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=None)
def test_empty_mapping(self):
with self.assertRaisesRegexp(
ValueError, r'vocabulary_list.*must be non-empty'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=tuple([]))
def test_duplicate_mapping(self):
with self.assertRaisesRegexp(ValueError, 'Duplicate keys'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 12))
def test_invalid_num_oov_buckets(self):
with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36),
num_oov_buckets=-1)
def test_invalid_buckets_and_default_value(self):
with self.assertRaisesRegexp(
ValueError, 'both num_oov_buckets and default_value'):
fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=(12, 24, 36),
num_oov_buckets=100,
default_value=2)
def test_invalid_input_dtype_int32(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(12, 24, 36),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
def test_invalid_input_dtype_string(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=(12, 24, 36))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'):
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
def test_parse_example_string(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_parse_example_int(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(11, 21, 31))
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[11, 21]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=[11, 21],
dense_shape=[1, 2]),
features['aaa'].eval())
def test_get_sparse_tensors(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_transform_feature(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_tensor = _transform_features({'aaa': inputs}, [column])[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
def test_get_sparse_tensors_weight_collections(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
column._get_sparse_tensors(
_LazyBuilder({
'aaa': inputs
}), weight_collections=('my_weights',))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertItemsEqual([], ops.get_collection('my_weights'))
def test_get_sparse_tensors_dense_input(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
id_weight_pair = column._get_sparse_tensors(
_LazyBuilder({
'aaa': (('marlo', ''), ('skywalker', 'omar'))
}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_default_value_in_vocabulary(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
default_value=2)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 2, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (1, 2)),
values=('marlo', 'skywalker', 'omar', 'heisenberg'),
dense_shape=(2, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 33, 0, 62), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((11, 100, 30, 22), dtype=np.int32),
dense_shape=(3, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_dense_input(self):
default_value = -100
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32,
default_value=default_value)
id_weight_pair = column._get_sparse_tensors(
_LazyBuilder({
'aaa':
np.array(
((11, -1, -1), (100, 30, -1), (-1, -1, 22)), dtype=np.int32)
}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((2, default_value, 0, 4), dtype=np.int64),
dense_shape=(3, 3)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_int32_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 60, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_linear_model(self):
wire_column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=1)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = fc.linear_model({
wire_column.name: sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), predictions.eval())
def test_keras_linear_model(self):
wire_column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=1)
self.assertEqual(4, wire_column._num_buckets)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
wire_column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), wire_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval()
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), predictions.eval())
class IdentityCategoricalColumnTest(test.TestCase):
def test_constructor(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column.key)
self.assertEqual('aaa', column._var_scope_name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, column._parse_example_spec)
def test_key_should_be_string(self):
with self.assertRaisesRegexp(ValueError, 'key must be a string.'):
fc.categorical_column_with_identity(key=('aaa',), num_buckets=3)
def test_deep_copy(self):
original = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, column._parse_example_spec)
def test_invalid_num_buckets_zero(self):
with self.assertRaisesRegexp(ValueError, 'num_buckets 0 < 1'):
fc.categorical_column_with_identity(key='aaa', num_buckets=0)
def test_invalid_num_buckets_negative(self):
with self.assertRaisesRegexp(ValueError, 'num_buckets -1 < 1'):
fc.categorical_column_with_identity(key='aaa', num_buckets=-1)
def test_invalid_default_value_too_small(self):
with self.assertRaisesRegexp(ValueError, 'default_value -1 not in range'):
fc.categorical_column_with_identity(
key='aaa', num_buckets=3, default_value=-1)
def test_invalid_default_value_too_big(self):
with self.assertRaisesRegexp(ValueError, 'default_value 3 not in range'):
fc.categorical_column_with_identity(
key='aaa', num_buckets=3, default_value=3)
def test_invalid_input_dtype(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'Invalid input, not integer'):
column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
def test_parse_example(self):
a = fc.categorical_column_with_identity(key='aaa', num_buckets=30)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[11, 21]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([11, 21], dtype=np.int64),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_get_sparse_tensors(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_transform_feature(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
id_tensor = _transform_features({'aaa': inputs}, [column])[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
def test_get_sparse_tensors_weight_collections(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
column._get_sparse_tensors(
_LazyBuilder({
'aaa': inputs
}), weight_collections=('my_weights',))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
self.assertItemsEqual([], ops.get_collection('my_weights'))
def test_get_sparse_tensors_dense_input(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
id_weight_pair = column._get_sparse_tensors(
_LazyBuilder({
'aaa': ((0, -1), (1, 0))
}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_inputs_too_small(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, -1, 0),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
with self.assertRaisesRegexp(
errors.OpError, 'assert_greater_or_equal_0'):
id_weight_pair.id_tensor.eval()
def test_get_sparse_tensors_with_inputs_too_big(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 99, 0),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
with self.assertRaisesRegexp(
errors.OpError, 'assert_less_than_num_buckets'):
id_weight_pair.id_tensor.eval()
def test_get_sparse_tensors_with_default_value(self):
column = fc.categorical_column_with_identity(
key='aaa', num_buckets=4, default_value=3)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, -1, 99),
dense_shape=(2, 2))
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((1, 3, 3), dtype=np.int64),
dense_shape=inputs.dense_shape),
id_weight_pair.id_tensor.eval())
def test_get_sparse_tensors_with_default_value_and_placeholder_inputs(self):
column = fc.categorical_column_with_identity(
key='aaa', num_buckets=4, default_value=3)
input_indices = array_ops.placeholder(dtype=dtypes.int64)
input_values = array_ops.placeholder(dtype=dtypes.int32)
input_shape = array_ops.placeholder(dtype=dtypes.int64)
inputs = sparse_tensor.SparseTensorValue(
indices=input_indices,
values=input_values,
dense_shape=input_shape)
id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs}))
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=np.array(((0, 0), (1, 0), (1, 1)), dtype=np.int64),
values=np.array((1, 3, 3), dtype=np.int64),
dense_shape=np.array((2, 2), dtype=np.int64)),
id_weight_pair.id_tensor.eval(feed_dict={
input_indices: ((0, 0), (1, 0), (1, 1)),
input_values: (1, -1, 99),
input_shape: (2, 2),
}))
def test_linear_model(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
self.assertEqual(3, column._num_buckets)
with ops.Graph().as_default():
predictions = fc.linear_model({
column.name: sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] = 1
# weight_var[2] + weight_var[1] = 3+2 = 5
self.assertAllClose(((1.,), (5.,)), predictions.eval())
def test_keras_linear_model(self):
column = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
self.assertEqual(3, column._num_buckets)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] = 1
# weight_var[2] + weight_var[1] = 3+2 = 5
self.assertAllClose(((1.,), (5.,)), predictions.eval())
class TransformFeaturesTest(test.TestCase):
# All transform tests are distributed in column test.
# Here we only test multi column case and naming
def transform_multi_column(self):
bucketized_price = fc.bucketized_column(
fc.numeric_column('price'), boundaries=[0, 2, 4, 6])
hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10)
with ops.Graph().as_default():
features = {
'price': [[-1.], [5.]],
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
}
transformed = _transform_features(features,
[bucketized_price, hashed_sparse])
with _initialized_session():
self.assertIn(bucketized_price.name, transformed[bucketized_price].name)
self.assertAllEqual([[0], [3]], transformed[bucketized_price].eval())
self.assertIn(hashed_sparse.name, transformed[hashed_sparse].name)
self.assertAllEqual([6, 4, 1], transformed[hashed_sparse].values.eval())
def test_column_order(self):
"""When the column is both dense and sparse, uses sparse tensors."""
class _LoggerColumn(_FeatureColumn):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
def _transform_feature(self, inputs):
del inputs
self.call_order = call_logger['count']
call_logger['count'] += 1
return 'Anything'
@property
def _parse_example_spec(self):
pass
with ops.Graph().as_default():
column1 = _LoggerColumn('1')
column2 = _LoggerColumn('2')
call_logger = {'count': 0}
_transform_features({}, [column1, column2])
self.assertEqual(0, column1.call_order)
self.assertEqual(1, column2.call_order)
call_logger = {'count': 0}
_transform_features({}, [column2, column1])
self.assertEqual(0, column1.call_order)
self.assertEqual(1, column2.call_order)
class IndicatorColumnTest(test.TestCase):
def test_indicator_column(self):
a = fc.categorical_column_with_hash_bucket('a', 4)
indicator_a = fc.indicator_column(a)
self.assertEqual(indicator_a.categorical_column.name, 'a')
self.assertEqual(indicator_a.name, 'a_indicator')
self.assertEqual(indicator_a._var_scope_name, 'a_indicator')
self.assertEqual(indicator_a._variable_shape, [1, 4])
b = fc.categorical_column_with_hash_bucket('b', hash_bucket_size=100)
indicator_b = fc.indicator_column(b)
self.assertEqual(indicator_b.categorical_column.name, 'b')
self.assertEqual(indicator_b.name, 'b_indicator')
self.assertEqual(indicator_b._var_scope_name, 'b_indicator')
self.assertEqual(indicator_b._variable_shape, [1, 100])
def test_1D_shape_succeeds(self):
animal = fc.indicator_column(
fc.categorical_column_with_hash_bucket('animal', 4))
builder = _LazyBuilder({'animal': ['fox', 'fox']})
output = builder.get(animal)
with self.test_session():
self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]], output.eval())
def test_2D_shape_succeeds(self):
# TODO(ispir/cassandrax): Swith to categorical_column_with_keys when ready.
animal = fc.indicator_column(
fc.categorical_column_with_hash_bucket('animal', 4))
builder = _LazyBuilder({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0]],
values=['fox', 'fox'],
dense_shape=[2, 1])
})
output = builder.get(animal)
with self.test_session():
self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]], output.eval())
def test_multi_hot(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
builder = _LazyBuilder({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 1], dense_shape=[1, 2])
})
output = builder.get(animal)
with self.test_session():
self.assertAllEqual([[0., 2., 0., 0.]], output.eval())
def test_multi_hot2(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
builder = _LazyBuilder({
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
})
output = builder.get(animal)
with self.test_session():
self.assertAllEqual([[0., 1., 1., 0.]], output.eval())
def test_deep_copy(self):
a = fc.categorical_column_with_hash_bucket('a', 4)
column = fc.indicator_column(a)
column_copy = copy.deepcopy(column)
self.assertEqual(column_copy.categorical_column.name, 'a')
self.assertEqual(column.name, 'a_indicator')
self.assertEqual(column._variable_shape, [1, 4])
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_indicator = fc.indicator_column(a)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a_indicator]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_transform(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_indicator = fc.indicator_column(a)
features = {
'aaa': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}
indicator_tensor = _transform_features(features, [a_indicator])[a_indicator]
with _initialized_session():
self.assertAllEqual([[0, 0, 1], [1, 0, 0]], indicator_tensor.eval())
def test_transform_with_weighted_column(self):
# Github issue 12557
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
weights = fc.weighted_categorical_column(ids, 'weights')
indicator = fc.indicator_column(weights)
features = {
'ids': constant_op.constant([['c', 'b', 'a']]),
'weights': constant_op.constant([[2., 4., 6.]])
}
indicator_tensor = _transform_features(features, [indicator])[indicator]
with _initialized_session():
self.assertAllEqual([[6., 4., 2.]], indicator_tensor.eval())
def test_transform_with_missing_value_in_weighted_column(self):
# Github issue 12583
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
weights = fc.weighted_categorical_column(ids, 'weights')
indicator = fc.indicator_column(weights)
features = {
'ids': constant_op.constant([['c', 'b', 'unknown']]),
'weights': constant_op.constant([[2., 4., 6.]])
}
indicator_tensor = _transform_features(features, [indicator])[indicator]
with _initialized_session():
self.assertAllEqual([[0., 4., 2.]], indicator_tensor.eval())
def test_transform_with_missing_value_in_categorical_column(self):
# Github issue 12583
ids = fc.categorical_column_with_vocabulary_list(
key='ids', vocabulary_list=('a', 'b', 'c'))
indicator = fc.indicator_column(ids)
features = {
'ids': constant_op.constant([['c', 'b', 'unknown']]),
}
indicator_tensor = _transform_features(features, [indicator])[indicator]
with _initialized_session():
self.assertAllEqual([[0., 1., 1.]], indicator_tensor.eval())
def test_linear_model(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
predictions = fc.linear_model(features, [animal])
weight_var = get_linear_model_column_var(animal)
with _initialized_session():
# All should be zero-initialized.
self.assertAllClose([[0.], [0.], [0.], [0.]], weight_var.eval())
self.assertAllClose([[0.]], predictions.eval())
weight_var.assign([[1.], [2.], [3.], [4.]]).eval()
self.assertAllClose([[2. + 3.]], predictions.eval())
def test_keras_linear_model(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
predictions = get_keras_linear_model_predictions(features, [animal])
weight_var = get_linear_model_column_var(animal)
with _initialized_session():
# All should be zero-initialized.
self.assertAllClose([[0.], [0.], [0.], [0.]], weight_var.eval())
self.assertAllClose([[0.]], predictions.eval())
weight_var.assign([[1.], [2.], [3.], [4.]]).eval()
self.assertAllClose([[2. + 3.]], predictions.eval())
def test_input_layer(self):
animal = fc.indicator_column(
fc.categorical_column_with_identity('animal', num_buckets=4))
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
net = fc.input_layer(features, [animal])
with _initialized_session():
self.assertAllClose([[0., 1., 1., 0.]], net.eval())
class EmbeddingColumnTest(test.TestCase):
def test_defaults(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('mean', embedding_column.combiner)
self.assertIsNone(embedding_column.ckpt_to_load_from)
self.assertIsNone(embedding_column.tensor_name_in_ckpt)
self.assertIsNone(embedding_column.max_norm)
self.assertTrue(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual('aaa_embedding', embedding_column._var_scope_name)
self.assertEqual(
(embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_all_constructor_args(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
combiner='my_combiner', initializer=lambda: 'my_initializer',
ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42., trainable=False)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from)
self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt)
self.assertEqual(42., embedding_column.max_norm)
self.assertFalse(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual('aaa_embedding', embedding_column._var_scope_name)
self.assertEqual(
(embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_deep_copy(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
original = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
combiner='my_combiner', initializer=lambda: 'my_initializer',
ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42., trainable=False)
for embedding_column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', embedding_column.categorical_column.name)
self.assertEqual(3, embedding_column.categorical_column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column.categorical_column._parse_example_spec)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from)
self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt)
self.assertEqual(42., embedding_column.max_norm)
self.assertFalse(embedding_column.trainable)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual(
(embedding_dimension,), embedding_column._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
def test_invalid_initializer(self):
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
with self.assertRaisesRegexp(ValueError, 'initializer must be callable'):
fc.embedding_column(categorical_column, dimension=2, initializer='not_fn')
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_embedded = fc.embedding_column(a, dimension=2)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a_embedded]))
self.assertIn('aaa', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
def test_transform_feature(self):
a = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
a_embedded = fc.embedding_column(a, dimension=2)
features = {
'aaa': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
}
outputs = _transform_features(features, [a, a_embedded])
output_a = outputs[a]
output_embedded = outputs[a_embedded]
with _initialized_session():
_assert_sparse_tensor_value(
self, output_a.eval(), output_embedded.eval())
def test_get_dense_tensor(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column._get_dense_tensor(
_LazyBuilder({
'aaa': sparse_input
}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
def test_get_dense_tensor_3d(self):
# Inputs.
vocabulary_size = 4
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0, 0), (1, 1, 0), (1, 1, 4), (3, 0, 0), (3, 1, 2)),
values=(2, 0, 1, 1, 2),
dense_shape=(4, 2, 5))
# Embedding variable.
embedding_dimension = 3
embedding_values = (
(1., 2., 4.), # id 0
(3., 5., 1.), # id 1
(7., 11., 2.), # id 2
(2., 7., 12.) # id 3
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [[2], []], embedding = [[7, 11, 2], [0, 0, 0]]
((7., 11., 2.), (0., 0., 0.)),
# example 1, ids [[], [0, 1]], embedding
# = mean([[], [1, 2, 4] + [3, 5, 1]]) = [[0, 0, 0], [2, 3.5, 2.5]]
((0., 0., 0.), (2., 3.5, 2.5)),
# example 2, ids [[], []], embedding = [[0, 0, 0], [0, 0, 0]]
((0., 0., 0.), (0., 0., 0.)),
# example 3, ids [[1], [2]], embedding = [[3, 5, 1], [7, 11, 2]]
((3., 5., 1.), (7., 11., 2.)),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column._get_dense_tensor(
_LazyBuilder({
'aaa': sparse_input
}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
def test_get_dense_tensor_weight_collections(self):
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_column = fc.embedding_column(categorical_column, dimension=2)
# Provide sparse input and get dense result.
embedding_column._get_dense_tensor(
_LazyBuilder({
'aaa': sparse_input
}), weight_collections=('my_vars',))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
my_vars = ops.get_collection('my_vars')
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in my_vars]))
def test_get_dense_tensor_placeholder_inputs(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
input_indices = array_ops.placeholder(dtype=dtypes.int64)
input_values = array_ops.placeholder(dtype=dtypes.int64)
input_shape = array_ops.placeholder(dtype=dtypes.int64)
embedding_lookup = embedding_column._get_dense_tensor(
_LazyBuilder({
'aaa':
sparse_tensor.SparseTensorValue(
indices=input_indices,
values=input_values,
dense_shape=input_shape)
}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval(
feed_dict={
input_indices: sparse_input.indices,
input_values: sparse_input.values,
input_shape: sparse_input.dense_shape,
}))
def test_get_dense_tensor_restore_from_ckpt(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable. The checkpoint file contains _embedding_values.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
ckpt_path = test.test_src_dir_path(
'python/feature_column/testdata/embedding.ckpt')
ckpt_tensor = 'my_embedding'
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
ckpt_to_load_from=ckpt_path,
tensor_name_in_ckpt=ckpt_tensor)
# Provide sparse input and get dense result.
embedding_lookup = embedding_column._get_dense_tensor(
_LazyBuilder({
'aaa': sparse_input
}))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('embedding_weights:0',), tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
def test_linear_model(self):
# Inputs.
batch_size = 4
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(batch_size, 5))
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
with ops.Graph().as_default():
predictions = fc.linear_model({
categorical_column.name: sparse_input
}, (embedding_column,))
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_embedding/weights:0',
'linear_model/aaa_embedding/embedding_weights:0',
)
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v for v in ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertItemsEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars[
'linear_model/aaa_embedding/embedding_weights:0']
linear_weights = trainable_vars[
'linear_model/aaa_embedding/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), bias.eval())
self.assertAllClose(zeros_embedding_values, embedding_weights.eval())
self.assertAllClose(
np.zeros((embedding_dimension, 1)), linear_weights.eval())
self.assertAllClose(np.zeros((batch_size, 1)), predictions.eval())
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# example 2, ids [], embedding[2] = [0, 0]
# example 3, ids [1], embedding[3] = [3, 5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42]
self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), predictions.eval())
def test_keras_linear_model(self):
# Inputs.
batch_size = 4
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(batch_size, 5))
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
categorical_column.name: sparse_input
}, (embedding_column,))
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_embedding/weights:0',
'linear_model/aaa_embedding/embedding_weights:0',
)
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v
for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertItemsEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars[
'linear_model/aaa_embedding/embedding_weights:0']
linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), bias.eval())
self.assertAllClose(zeros_embedding_values, embedding_weights.eval())
self.assertAllClose(
np.zeros((embedding_dimension, 1)), linear_weights.eval())
self.assertAllClose(np.zeros((batch_size, 1)), predictions.eval())
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# example 2, ids [], embedding[2] = [0, 0]
# example 3, ids [1], embedding[3] = [3, 5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42]
self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), predictions.eval())
def test_input_layer(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
# Provide sparse input and get dense result.
input_layer = fc.input_layer({'aaa': sparse_input}, (embedding_column,))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(
('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in trainable_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, trainable_vars[0].eval())
self.assertAllEqual(expected_lookups, input_layer.eval())
def test_input_layer_not_trainable(self):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer, trainable=False)
# Provide sparse input and get dense result.
input_layer = fc.input_layer({'aaa': sparse_input}, (embedding_column,))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
self.assertItemsEqual(
[], ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, input_layer.eval())
class SharedEmbeddingColumnTest(test.TestCase):
def test_defaults(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = fc.shared_embedding_columns(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual(embedding_dimension, embedding_column_a.dimension)
self.assertEqual(embedding_dimension, embedding_column_b.dimension)
self.assertEqual('mean', embedding_column_a.combiner)
self.assertEqual('mean', embedding_column_b.combiner)
self.assertIsNone(embedding_column_a.ckpt_to_load_from)
self.assertIsNone(embedding_column_b.ckpt_to_load_from)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_a.shared_embedding_collection_name)
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_b.shared_embedding_collection_name)
self.assertIsNone(embedding_column_a.tensor_name_in_ckpt)
self.assertIsNone(embedding_column_b.tensor_name_in_ckpt)
self.assertIsNone(embedding_column_a.max_norm)
self.assertIsNone(embedding_column_b.max_norm)
self.assertTrue(embedding_column_a.trainable)
self.assertTrue(embedding_column_b.trainable)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual(
'aaa_bbb_shared_embedding', embedding_column_a._var_scope_name)
self.assertEqual(
'aaa_bbb_shared_embedding', embedding_column_b._var_scope_name)
self.assertEqual(
(embedding_dimension,), embedding_column_a._variable_shape)
self.assertEqual(
(embedding_dimension,), embedding_column_b._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a._parse_example_spec)
self.assertEqual({
'bbb': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_b._parse_example_spec)
def test_all_constructor_args(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
shared_embedding_collection_name='shared_embedding_collection_name',
ckpt_to_load_from='my_ckpt',
tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42.,
trainable=False)
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual(embedding_dimension, embedding_column_a.dimension)
self.assertEqual(embedding_dimension, embedding_column_b.dimension)
self.assertEqual('my_combiner', embedding_column_a.combiner)
self.assertEqual('my_combiner', embedding_column_b.combiner)
self.assertEqual('shared_embedding_collection_name',
embedding_column_a.shared_embedding_collection_name)
self.assertEqual('shared_embedding_collection_name',
embedding_column_b.shared_embedding_collection_name)
self.assertEqual('my_ckpt', embedding_column_a.ckpt_to_load_from)
self.assertEqual('my_ckpt', embedding_column_b.ckpt_to_load_from)
self.assertEqual('my_ckpt_tensor', embedding_column_a.tensor_name_in_ckpt)
self.assertEqual('my_ckpt_tensor', embedding_column_b.tensor_name_in_ckpt)
self.assertEqual(42., embedding_column_a.max_norm)
self.assertEqual(42., embedding_column_b.max_norm)
self.assertFalse(embedding_column_a.trainable)
self.assertFalse(embedding_column_b.trainable)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual(
'shared_embedding_collection_name', embedding_column_a._var_scope_name)
self.assertEqual(
'shared_embedding_collection_name', embedding_column_b._var_scope_name)
self.assertEqual(
(embedding_dimension,), embedding_column_a._variable_shape)
self.assertEqual(
(embedding_dimension,), embedding_column_b._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a._parse_example_spec)
self.assertEqual({
'bbb': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_b._parse_example_spec)
def test_deep_copy(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
original_a, _ = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
shared_embedding_collection_name='shared_embedding_collection_name',
ckpt_to_load_from='my_ckpt',
tensor_name_in_ckpt='my_ckpt_tensor',
max_norm=42., trainable=False)
for embedding_column_a in (original_a, copy.deepcopy(original_a)):
self.assertEqual('aaa', embedding_column_a.categorical_column.name)
self.assertEqual(3, embedding_column_a.categorical_column._num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a.categorical_column._parse_example_spec)
self.assertEqual(embedding_dimension, embedding_column_a.dimension)
self.assertEqual('my_combiner', embedding_column_a.combiner)
self.assertEqual('shared_embedding_collection_name',
embedding_column_a.shared_embedding_collection_name)
self.assertEqual('my_ckpt', embedding_column_a.ckpt_to_load_from)
self.assertEqual('my_ckpt_tensor', embedding_column_a.tensor_name_in_ckpt)
self.assertEqual(42., embedding_column_a.max_norm)
self.assertFalse(embedding_column_a.trainable)
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual(
(embedding_dimension,), embedding_column_a._variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column_a._parse_example_spec)
def test_invalid_initializer(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
with self.assertRaisesRegexp(ValueError, 'initializer must be callable'):
fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2,
initializer='not_fn')
def test_incompatible_column_type(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
categorical_column_c = fc.categorical_column_with_hash_bucket(
key='ccc', hash_bucket_size=3)
with self.assertRaisesRegexp(
ValueError,
'all categorical_columns must have the same type.*'
'_IdentityCategoricalColumn.*_HashedCategoricalColumn'):
fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b, categorical_column_c],
dimension=2)
def test_weighted_categorical_column_ok(self):
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
weighted_categorical_column_a = fc.weighted_categorical_column(
categorical_column_a, weight_feature_key='aaa_weights')
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
weighted_categorical_column_b = fc.weighted_categorical_column(
categorical_column_b, weight_feature_key='bbb_weights')
fc.shared_embedding_columns(
[weighted_categorical_column_a, categorical_column_b], dimension=2)
fc.shared_embedding_columns(
[categorical_column_a, weighted_categorical_column_b], dimension=2)
fc.shared_embedding_columns(
[weighted_categorical_column_a, weighted_categorical_column_b],
dimension=2)
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
b = fc.categorical_column_with_vocabulary_list(
key='bbb', vocabulary_list=('omar', 'stringer', 'marlo'))
a_embedded, b_embedded = fc.shared_embedding_columns(
[a, b], dimension=2)
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer'])),
'bbb':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'stringer', b'marlo'])),
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a_embedded, b_embedded]))
self.assertIn('aaa', features)
self.assertIn('bbb', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'stringer', b'marlo'], dtype=np.object_),
dense_shape=[1, 2]),
features['bbb'].eval())
def test_transform_feature(self):
a = fc.categorical_column_with_identity(key='aaa', num_buckets=3)
b = fc.categorical_column_with_identity(key='bbb', num_buckets=3)
a_embedded, b_embedded = fc.shared_embedding_columns(
[a, b], dimension=2)
features = {
'aaa': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb': sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
outputs = _transform_features(features, [a, a_embedded, b, b_embedded])
output_a = outputs[a]
output_a_embedded = outputs[a_embedded]
output_b = outputs[b]
output_b_embedded = outputs[b_embedded]
with _initialized_session():
_assert_sparse_tensor_value(
self, output_a.eval(), output_a_embedded.eval())
_assert_sparse_tensor_value(
self, output_b.eval(), output_b_embedded.eval())
def test_get_dense_tensor(self):
# Inputs.
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array(
[[2, -1, -1], # example 0, ids [2]
[0, 1, -1]]) # example 1, ids [0, 1]
input_b = np.array(
[[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]]) # example 1, ids []
input_features = {
'aaa': input_a,
'bbb': input_b
}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups_a = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
expected_lookups_b = (
# example 0:
(1., 2.), # ids [0], embedding = [1, 2]
# example 1:
(0., 0.), # ids [], embedding = [0, 0]
)
# Build columns.
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension, initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup_a = embedding_column_a._get_dense_tensor(
_LazyBuilder(input_features))
embedding_lookup_b = embedding_column_b._get_dense_tensor(
_LazyBuilder(input_features))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var.eval())
self.assertAllEqual(expected_lookups_a, embedding_lookup_a.eval())
self.assertAllEqual(expected_lookups_b, embedding_lookup_b.eval())
def test_get_dense_tensor_weight_collections(self):
# Inputs.
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array([
[2, -1, -1], # example 0, ids [2]
[0, 1, -1]
]) # example 1, ids [0, 1]
input_b = np.array([
[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]
]) # example 1, ids []
input_features = {'aaa': input_a, 'bbb': input_b}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups_a = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
expected_lookups_b = (
# example 0:
(1., 2.), # ids [0], embedding = [1, 2]
# example 1:
(0., 0.), # ids [], embedding = [0, 0]
)
# Build columns.
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
fc.input_layer(
input_features, [embedding_column_a, embedding_column_b],
weight_collections=('my_vars',))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('input_layer/aaa_bbb_shared_embedding/embedding_weights:0',),
tuple(v.name for v in global_vars))
my_vars = ops.get_collection('my_vars')
self.assertItemsEqual(
('input_layer/aaa_bbb_shared_embedding/embedding_weights:0',),
tuple(v.name for v in my_vars))
def test_get_dense_tensor_placeholder_inputs(self):
# Inputs.
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array(
[[2, -1, -1], # example 0, ids [2]
[0, 1, -1]]) # example 1, ids [0, 1]
input_b = np.array(
[[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]]) # example 1, ids []
# Specify shape, because dense input must have rank specified.
input_a_placeholder = array_ops.placeholder(
dtype=dtypes.int64, shape=[None, 3])
input_b_placeholder = array_ops.placeholder(
dtype=dtypes.int64, shape=[None, 3])
input_features = {
'aaa': input_a_placeholder,
'bbb': input_b_placeholder,
}
feed_dict = {
input_a_placeholder: input_a,
input_b_placeholder: input_b,
}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Build columns.
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension, initializer=_initializer)
# Provide sparse input and get dense result.
embedding_lookup_a = embedding_column_a._get_dense_tensor(
_LazyBuilder(input_features))
embedding_lookup_b = embedding_column_b._get_dense_tensor(
_LazyBuilder(input_features))
with _initialized_session() as sess:
sess.run([embedding_lookup_a, embedding_lookup_b], feed_dict=feed_dict)
def test_linear_model(self):
# Inputs.
batch_size = 2
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array(
[[2, -1, -1], # example 0, ids [2]
[0, 1, -1]]) # example 1, ids [0, 1]
input_b = np.array(
[[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]]) # example 1, ids []
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension, initializer=_initializer)
with ops.Graph().as_default():
predictions = fc.linear_model({
categorical_column_a.name: input_a,
categorical_column_b.name: input_b,
}, (embedding_column_a, embedding_column_b))
# Linear weights do not follow the column name. But this is a rare use
# case, and fixing it would add too much complexity to the code.
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_bbb_shared_embedding/weights:0',
'linear_model/aaa_bbb_shared_embedding/embedding_weights:0',
'linear_model/aaa_bbb_shared_embedding_1/weights:0',
)
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v for v in ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertItemsEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars[
'linear_model/aaa_bbb_shared_embedding/embedding_weights:0']
linear_weights_a = trainable_vars[
'linear_model/aaa_bbb_shared_embedding/weights:0']
linear_weights_b = trainable_vars[
'linear_model/aaa_bbb_shared_embedding_1/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), bias.eval())
self.assertAllClose(zeros_embedding_values, embedding_weights.eval())
self.assertAllClose(
np.zeros((embedding_dimension, 1)), linear_weights_a.eval())
self.assertAllClose(
np.zeros((embedding_dimension, 1)), linear_weights_b.eval())
self.assertAllClose(np.zeros((batch_size, 1)), predictions.eval())
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights_a.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5] = [94, 29]
linear_weights_b.assign(((3.,), (5.,))).eval()
# example 0, ids [0], embedding[0] = [1, 2]
# example 1, ids [], embedding[1] = 0, 0]
# sum(embeddings * linear_weights)
# = [3*1 + 5*2, 3*0 +5*0] = [13, 0]
self.assertAllClose([[94. + 13.], [29.]], predictions.eval())
def test_keras_linear_model(self):
# Inputs.
batch_size = 2
vocabulary_size = 3
# -1 values are ignored.
input_a = np.array([
[2, -1, -1], # example 0, ids [2]
[0, 1, -1]
]) # example 1, ids [0, 1]
input_b = np.array([
[0, -1, -1], # example 0, ids [0]
[-1, -1, -1]
]) # example 1, ids []
# Embedding variable.
embedding_dimension = 2
embedding_shape = (vocabulary_size, embedding_dimension)
zeros_embedding_values = np.zeros(embedding_shape)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual(embedding_shape, shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return zeros_embedding_values
# Build columns.
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
categorical_column_a.name: input_a,
categorical_column_b.name: input_b,
}, (embedding_column_a, embedding_column_b))
# Linear weights do not follow the column name. But this is a rare use
# case, and fixing it would add too much complexity to the code.
expected_var_names = (
'linear_model/bias_weights:0',
'linear_model/aaa_bbb_shared_embedding/weights:0',
'linear_model/aaa_bbb_shared_embedding/embedding_weights:0',
'linear_model/aaa_bbb_shared_embedding_1/weights:0',
)
self.assertItemsEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
trainable_vars = {
v.name: v
for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
}
self.assertItemsEqual(expected_var_names, trainable_vars.keys())
bias = trainable_vars['linear_model/bias_weights:0']
embedding_weights = trainable_vars[
'linear_model/aaa_bbb_shared_embedding/embedding_weights:0']
linear_weights_a = trainable_vars[
'linear_model/aaa_bbb_shared_embedding/weights:0']
linear_weights_b = trainable_vars[
'linear_model/aaa_bbb_shared_embedding_1/weights:0']
with _initialized_session():
# Predictions with all zero weights.
self.assertAllClose(np.zeros((1,)), bias.eval())
self.assertAllClose(zeros_embedding_values, embedding_weights.eval())
self.assertAllClose(
np.zeros((embedding_dimension, 1)), linear_weights_a.eval())
self.assertAllClose(
np.zeros((embedding_dimension, 1)), linear_weights_b.eval())
self.assertAllClose(np.zeros((batch_size, 1)), predictions.eval())
# Predictions with all non-zero weights.
embedding_weights.assign((
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)).eval()
linear_weights_a.assign(((4.,), (6.,))).eval()
# example 0, ids [2], embedding[0] = [7, 11]
# example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5]
# sum(embeddings * linear_weights)
# = [4*7 + 6*11, 4*2 + 6*3.5] = [94, 29]
linear_weights_b.assign(((3.,), (5.,))).eval()
# example 0, ids [0], embedding[0] = [1, 2]
# example 1, ids [], embedding[1] = 0, 0]
# sum(embeddings * linear_weights)
# = [3*1 + 5*2, 3*0 +5*0] = [13, 0]
self.assertAllClose([[94. + 13.], [29.]], predictions.eval())
def _test_input_layer(self, trainable=True):
# Inputs.
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 4)),
values=(2, 0, 1),
dense_shape=(2, 5))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [0]
# example 1, ids []
indices=((0, 0),),
values=(0,),
dense_shape=(2, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0:
# A ids [2], embedding = [7, 11]
# B ids [0], embedding = [1, 2]
(7., 11., 1., 2.),
# example 1:
# A ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
# B ids [], embedding = [0, 0]
(2., 3.5, 0., 0.),
)
# Build columns.
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension, initializer=_initializer,
trainable=trainable)
# Provide sparse input and get dense result.
input_layer = fc.input_layer(
features={'aaa': sparse_input_a, 'bbb': sparse_input_b},
feature_columns=(embedding_column_b, embedding_column_a))
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'],
tuple([v.name for v in global_vars]))
trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
if trainable:
self.assertItemsEqual(
['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'],
tuple([v.name for v in trainable_vars]))
else:
self.assertItemsEqual([], tuple([v.name for v in trainable_vars]))
shared_embedding_vars = global_vars
with _initialized_session():
self.assertAllEqual(embedding_values, shared_embedding_vars[0].eval())
self.assertAllEqual(expected_lookups, input_layer.eval())
def test_input_layer(self):
self._test_input_layer()
def test_input_layer_no_trainable(self):
self._test_input_layer(trainable=False)
class WeightedCategoricalColumnTest(test.TestCase):
def test_defaults(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
self.assertEqual('ids_weighted_by_values', column.name)
self.assertEqual('ids_weighted_by_values', column._var_scope_name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'ids': parsing_ops.VarLenFeature(dtypes.int64),
'values': parsing_ops.VarLenFeature(dtypes.float32)
}, column._parse_example_spec)
def test_deep_copy(self):
"""Tests deepcopy of categorical_column_with_hash_bucket."""
original = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
for column in (original, copy.deepcopy(original)):
self.assertEqual('ids_weighted_by_values', column.name)
self.assertEqual(3, column._num_buckets)
self.assertEqual({
'ids': parsing_ops.VarLenFeature(dtypes.int64),
'values': parsing_ops.VarLenFeature(dtypes.float32)
}, column._parse_example_spec)
def test_invalid_dtype_none(self):
with self.assertRaisesRegexp(ValueError, 'is not convertible to float'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values',
dtype=None)
def test_invalid_dtype_string(self):
with self.assertRaisesRegexp(ValueError, 'is not convertible to float'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values',
dtype=dtypes.string)
def test_invalid_input_dtype(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
strings = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(ValueError, 'Bad dtype'):
_transform_features({'ids': strings, 'values': strings}, (column,))
def test_column_name_collision(self):
with self.assertRaisesRegexp(ValueError, r'Parse config.*already exists'):
fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='aaa', num_buckets=3),
weight_feature_key='aaa')._parse_example_spec()
def test_missing_weights(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegexp(
ValueError, 'values is not in features dictionary'):
_transform_features({'ids': inputs}, (column,))
def test_parse_example(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
a_weighted = fc.weighted_categorical_column(a, weight_feature_key='weights')
data = example_pb2.Example(features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer'])),
'weights':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=[1., 10.]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec([a_weighted]))
self.assertIn('aaa', features)
self.assertIn('weights', features)
with self.test_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]),
features['aaa'].eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([1., 10.], dtype=np.float32),
dense_shape=[1, 2]),
features['weights'].eval())
def test_transform_features(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2))
weights = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0.5, 1.0, 0.1),
dense_shape=(2, 2))
id_tensor, weight_tensor = _transform_features({
'ids': inputs,
'values': weights,
}, (column,))[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(inputs.values, dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=weights.indices,
values=np.array(weights.values, dtype=np.float32),
dense_shape=weights.dense_shape),
weight_tensor.eval())
def test_transform_features_dense_input(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
weights = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0.5, 1.0, 0.1),
dense_shape=(2, 2))
id_tensor, weight_tensor = _transform_features({
'ids': ((0, -1), (1, 0)),
'values': weights,
}, (column,))[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((0, 1, 0), dtype=np.int64),
dense_shape=(2, 2)),
id_tensor.eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=weights.indices,
values=np.array(weights.values, dtype=np.float32),
dense_shape=weights.dense_shape),
weight_tensor.eval())
def test_transform_features_dense_weights(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 1, 0),
dense_shape=(2, 2))
id_tensor, weight_tensor = _transform_features({
'ids': inputs,
'values': ((.5, 0.), (1., .1)),
}, (column,))[column]
with _initialized_session():
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array(inputs.values, dtype=np.int64),
dense_shape=inputs.dense_shape),
id_tensor.eval())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((.5, 1., .1), dtype=np.float32),
dense_shape=(2, 2)),
weight_tensor.eval())
def test_keras_linear_model(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(.5, 1., .1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), predictions.eval())
def test_keras_linear_model_mismatched_shape(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError,
r'Dimensions.*are not compatible'):
get_keras_linear_model_predictions({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (0, 1), (1, 0), (1, 1)),
values=(.5, 11., 1., .1),
dense_shape=(2, 2))
}, (column,))
def test_keras_linear_model_mismatched_dense_values(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions(
{
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,))
}, (column,),
sparse_combiner='mean')
# Disabling the constant folding optimizer here since it changes the
# error message differently on CPU and GPU.
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
with _initialized_session(config):
with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
predictions.eval()
def test_keras_linear_model_mismatched_dense_shape(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = get_keras_linear_model_predictions({
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,), (.1,))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), predictions.eval())
def test_linear_model(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc.linear_model({
'ids': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(.5, 1., .1),
dense_shape=(2, 2))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), predictions.eval())
def test_linear_model_mismatched_shape(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError, r'Dimensions.*are not compatible'):
fc.linear_model({
'ids': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': sparse_tensor.SparseTensorValue(
indices=((0, 0), (0, 1), (1, 0), (1, 1)),
values=(.5, 11., 1., .1),
dense_shape=(2, 2))
}, (column,))
def test_linear_model_mismatched_dense_values(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc.linear_model(
{
'ids':
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,))
}, (column,),
sparse_combiner='mean')
# Disabling the constant folding optimizer here since it changes the
# error message differently on CPU and GPU.
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
with _initialized_session(config):
with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'):
predictions.eval()
def test_linear_model_mismatched_dense_shape(self):
column = fc.weighted_categorical_column(
categorical_column=fc.categorical_column_with_identity(
key='ids', num_buckets=3),
weight_feature_key='values')
with ops.Graph().as_default():
predictions = fc.linear_model({
'ids': sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 2, 1),
dense_shape=(2, 2)),
'values': ((.5,), (1.,), (.1,))
}, (column,))
bias = get_linear_model_bias()
weight_var = get_linear_model_column_var(column)
with _initialized_session():
self.assertAllClose((0.,), bias.eval())
self.assertAllClose(((0.,), (0.,), (0.,)), weight_var.eval())
self.assertAllClose(((0.,), (0.,)), predictions.eval())
weight_var.assign(((1.,), (2.,), (3.,))).eval()
# weight_var[0] * weights[0, 0] = 1 * .5 = .5
# weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1]
# = 3*1 + 2*.1 = 3+.2 = 3.2
self.assertAllClose(((.5,), (3.2,)), predictions.eval())
# TODO(ptucker): Add test with embedding of weighted categorical.
if __name__ == '__main__':
test.main()
| apache-2.0 |
analurandis/Tur | backend/venv/Lib/site-packages/setuptools/command/bdist_rpm.py | 1049 | 1508 | import distutils.command.bdist_rpm as orig
class bdist_rpm(orig.bdist_rpm):
"""
Override the default bdist_rpm behavior to do the following:
1. Run egg_info to ensure the name and version are properly calculated.
2. Always run 'install' using --single-version-externally-managed to
disable eggs in RPM distributions.
3. Replace dash with underscore in the version numbers for better RPM
compatibility.
"""
def run(self):
# ensure distro name is up-to-date
self.run_command('egg_info')
orig.bdist_rpm.run(self)
def _make_spec_file(self):
version = self.distribution.get_version()
rpmversion = version.replace('-', '_')
spec = orig.bdist_rpm._make_spec_file(self)
line23 = '%define version ' + version
line24 = '%define version ' + rpmversion
spec = [
line.replace(
"Source0: %{name}-%{version}.tar",
"Source0: %{name}-%{unmangled_version}.tar"
).replace(
"setup.py install ",
"setup.py install --single-version-externally-managed "
).replace(
"%setup",
"%setup -n %{name}-%{unmangled_version}"
).replace(line23, line24)
for line in spec
]
insert_loc = spec.index(line24) + 1
unmangled_version = "%define unmangled_version " + version
spec.insert(insert_loc, unmangled_version)
return spec
| mit |
dwettstein/pattern-recognition-2016 | mlp/model_selection/exceptions.py | 35 | 4329 | """
The :mod:`sklearn.exceptions` module includes all custom warnings and error
classes used across scikit-learn.
"""
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior."""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifies the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid"""
| mit |
robinro/ansible-modules-core | web_infrastructure/htpasswd.py | 10 | 9046 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Nimbis Services, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
module: htpasswd
version_added: "1.3"
short_description: manage user files for basic authentication
description:
- Add and remove username/password entries in a password file using htpasswd.
- This is used by web servers such as Apache and Nginx for basic authentication.
options:
path:
required: true
aliases: [ dest, destfile ]
description:
- Path to the file that contains the usernames and passwords
name:
required: true
aliases: [ username ]
description:
- User name to add or remove
password:
required: false
description:
- Password associated with user.
- Must be specified if user does not exist yet.
crypt_scheme:
required: false
choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
default: "apr_md5_crypt"
description:
- Encryption scheme to be used. As well as the four choices listed
here, you can also use any other hash supported by passlib, such as
md5_crypt and sha256_crypt, which are linux passwd hashes. If you
do so the password file will not be compatible with Apache or Nginx
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the user entry should be present or not
create:
required: false
choices: [ "yes", "no" ]
default: "yes"
description:
- Used with C(state=present). If specified, the file will be created
if it does not already exist. If set to "no", will fail if the
file does not exist
notes:
- "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
- "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
- "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
requires: [ passlib>=1.6 ]
author: "Ansible Core Team"
"""
EXAMPLES = """
# Add a user to a password file and ensure permissions are set
- htpasswd:
path: /etc/nginx/passwdfile
name: janedoe
password: '9s36?;fyNp'
owner: root
group: www-data
mode: 0640
# Remove a user from a password file
- htpasswd:
path: /etc/apache2/passwdfile
name: foobar
state: absent
# Add a user to a password file suitable for use by libpam-pwdfile
- htpasswd:
path: /etc/mail/passwords
name: alex
password: oedu2eGh
crypt_scheme: md5_crypt
"""
import os
import tempfile
from distutils.version import StrictVersion
try:
from passlib.apache import HtpasswdFile, htpasswd_context
from passlib.context import CryptContext
import passlib
except ImportError:
passlib_installed = False
else:
passlib_installed = True
apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
def create_missing_directories(dest):
destpath = os.path.dirname(dest)
if not os.path.exists(destpath):
os.makedirs(destpath)
def present(dest, username, password, crypt_scheme, create, check_mode):
""" Ensures user is present
Returns (msg, changed) """
if crypt_scheme in apache_hashes:
context = htpasswd_context
else:
context = CryptContext(schemes = [ crypt_scheme ] + apache_hashes)
if not os.path.exists(dest):
if not create:
raise ValueError('Destination %s does not exist' % dest)
if check_mode:
return ("Create %s" % dest, True)
create_missing_directories(dest)
if StrictVersion(passlib.__version__) >= StrictVersion('1.6'):
ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Created %s and added %s" % (dest, username), True)
else:
if StrictVersion(passlib.__version__) >= StrictVersion('1.6'):
ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
found = None
if getattr(ht, 'check_password', None):
found = ht.check_password(username, password)
else:
found = ht.verify(username, password)
if found:
return ("%s already present" % username, False)
else:
if not check_mode:
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Add/update %s" % username, True)
def absent(dest, username, check_mode):
""" Ensures user is absent
Returns (msg, changed) """
if StrictVersion(passlib.__version__) >= StrictVersion('1.6'):
ht = HtpasswdFile(dest, new=False)
else:
ht = HtpasswdFile(dest)
if username not in ht.users():
return ("%s not present" % username, False)
else:
if not check_mode:
ht.delete(username)
ht.save()
return ("Remove %s" % username, True)
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
arg_spec = dict(
path=dict(required=True, aliases=["dest", "destfile"]),
name=dict(required=True, aliases=["username"]),
password=dict(required=False, default=None, no_log=True),
crypt_scheme=dict(required=False, default="apr_md5_crypt"),
state=dict(required=False, default="present"),
create=dict(type='bool', default='yes'),
)
module = AnsibleModule(argument_spec=arg_spec,
add_file_common_args=True,
supports_check_mode=True)
path = module.params['path']
username = module.params['name']
password = module.params['password']
crypt_scheme = module.params['crypt_scheme']
state = module.params['state']
create = module.params['create']
check_mode = module.check_mode
if not passlib_installed:
module.fail_json(msg="This module requires the passlib Python library")
# Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
try:
f = open(path, "r")
except IOError:
# No preexisting file to remove blank lines from
f = None
else:
try:
lines = f.readlines()
finally:
f.close()
# If the file gets edited, it returns true, so only edit the file if it has blank lines
strip = False
for line in lines:
if not line.strip():
strip = True
break
if strip:
# If check mode, create a temporary file
if check_mode:
temp = tempfile.NamedTemporaryFile()
path = temp.name
f = open(path, "w")
try:
[ f.write(line) for line in lines if line.strip() ]
finally:
f.close()
try:
if state == 'present':
(msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
elif state == 'absent':
if not os.path.exists(path):
module.exit_json(msg="%s not present" % username,
warnings="%s does not exist" % path, changed=False)
(msg, changed) = absent(path, username, check_mode)
else:
module.fail_json(msg="Invalid state: %s" % state)
check_file_attrs(module, changed, msg)
module.exit_json(msg=msg, changed=changed)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
| gpl-3.0 |
MTASZTAKI/ApertusVR | plugins/languageAPI/jsAPI/3rdParty/nodejs/10.1.0/source/deps/v8/tools/gen-inlining-tests.py | 4 | 15748 | #!/usr/bin/env python3
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import namedtuple
import textwrap
import sys
SHARD_FILENAME_TEMPLATE = "test/mjsunit/compiler/inline-exception-{shard}.js"
# Generates 2 files. Found by trial and error.
SHARD_SIZE = 97
PREAMBLE = """
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --no-always-opt
// This test file was generated by tools/gen-inlining-tests.py .
// Global variables
var deopt = undefined; // either true or false
var counter = 0;
function resetState() {
counter = 0;
}
function warmUp(f) {
try {
f();
} catch (ex) {
// ok
}
try {
f();
} catch (ex) {
// ok
}
}
function resetOptAndAssertResultEquals(expected, f) {
warmUp(f);
resetState();
// %DebugPrint(f);
eval("'dont optimize this function itself please, but do optimize f'");
%OptimizeFunctionOnNextCall(f);
assertEquals(expected, f());
}
function resetOptAndAssertThrowsWith(expected, f) {
warmUp(f);
resetState();
// %DebugPrint(f);
eval("'dont optimize this function itself please, but do optimize f'");
%OptimizeFunctionOnNextCall(f);
try {
var result = f();
fail("resetOptAndAssertThrowsWith",
"exception: " + expected,
"result: " + result);
} catch (ex) {
assertEquals(expected, ex);
}
}
function increaseAndReturn15() {
if (deopt) %DeoptimizeFunction(f);
counter++;
return 15;
}
function increaseAndThrow42() {
if (deopt) %DeoptimizeFunction(f);
counter++;
throw 42;
}
function increaseAndReturn15_noopt_inner() {
if (deopt) %DeoptimizeFunction(f);
counter++;
return 15;
}
%NeverOptimizeFunction(increaseAndReturn15_noopt_inner);
function increaseAndThrow42_noopt_inner() {
if (deopt) %DeoptimizeFunction(f);
counter++;
throw 42;
}
%NeverOptimizeFunction(increaseAndThrow42_noopt_inner);
// Alternative 1
function returnOrThrow(doReturn) {
if (doReturn) {
return increaseAndReturn15();
} else {
return increaseAndThrow42();
}
}
// Alternative 2
function increaseAndReturn15_calls_noopt() {
return increaseAndReturn15_noopt_inner();
}
function increaseAndThrow42_calls_noopt() {
return increaseAndThrow42_noopt_inner();
}
// Alternative 3.
// When passed either {increaseAndReturn15} or {increaseAndThrow42}, it acts
// as the other one.
function invertFunctionCall(f) {
var result;
try {
result = f();
} catch (ex) {
return ex - 27;
}
throw result + 27;
}
// Alternative 4: constructor
function increaseAndStore15Constructor() {
if (deopt) %DeoptimizeFunction(f);
++counter;
this.x = 15;
}
function increaseAndThrow42Constructor() {
if (deopt) %DeoptimizeFunction(f);
++counter;
this.x = 42;
throw this.x;
}
// Alternative 5: property
var magic = {};
Object.defineProperty(magic, 'prop', {
get: function () {
if (deopt) %DeoptimizeFunction(f);
return 15 + 0 * ++counter;
},
set: function(x) {
// argument should be 37
if (deopt) %DeoptimizeFunction(f);
counter -= 36 - x; // increments counter
throw 42;
}
})
// Generate type feedback.
assertEquals(15, increaseAndReturn15_calls_noopt());
assertThrowsEquals(function() { return increaseAndThrow42_noopt_inner() }, 42);
assertEquals(15, (new increaseAndStore15Constructor()).x);
assertThrowsEquals(function() {
return (new increaseAndThrow42Constructor()).x;
},
42);
function runThisShard() {
""".strip()
def booltuples(n):
"""booltuples(2) yields 4 tuples: (False, False), (False, True),
(True, False), (True, True)."""
assert isinstance(n, int)
if n <= 0:
yield ()
else:
for initial in booltuples(n-1):
yield initial + (False,)
yield initial + (True,)
def fnname(flags):
assert len(FLAGLETTERS) == len(flags)
return "f_" + ''.join(
FLAGLETTERS[i] if b else '_'
for (i, b) in enumerate(flags))
NUM_TESTS_PRINTED = 0
NUM_TESTS_IN_SHARD = 0
def printtest(flags):
"""Print a test case. Takes a couple of boolean flags, on which the
printed Javascript code depends."""
assert all(isinstance(flag, bool) for flag in flags)
# The alternative flags are in reverse order so that if we take all possible
# tuples, ordered lexicographically from false to true, we get first the
# default, then alternative 1, then 2, etc.
(
alternativeFn5, # use alternative #5 for returning/throwing:
# return/throw using property
alternativeFn4, # use alternative #4 for returning/throwing:
# return/throw using constructor
alternativeFn3, # use alternative #3 for returning/throwing:
# return/throw indirectly, based on function argument
alternativeFn2, # use alternative #2 for returning/throwing:
# return/throw indirectly in unoptimized code,
# no branching
alternativeFn1, # use alternative #1 for returning/throwing:
# return/throw indirectly, based on boolean arg
tryThrows, # in try block, call throwing function
tryReturns, # in try block, call returning function
tryFirstReturns, # in try block, returning goes before throwing
tryResultToLocal, # in try block, result goes to local variable
doCatch, # include catch block
catchReturns, # in catch block, return
catchWithLocal, # in catch block, modify or return the local variable
catchThrows, # in catch block, throw
doFinally, # include finally block
finallyReturns, # in finally block, return local variable
finallyThrows, # in finally block, throw
endReturnLocal, # at very end, return variable local
deopt, # deopt inside inlined function
) = flags
# BASIC RULES
# Only one alternative can be applied at any time.
if (alternativeFn1 + alternativeFn2 + alternativeFn3 + alternativeFn4
+ alternativeFn5 > 1):
return
# In try, return or throw, or both.
if not (tryReturns or tryThrows): return
# Either doCatch or doFinally.
if not doCatch and not doFinally: return
# Catch flags only make sense when catching
if not doCatch and (catchReturns or catchWithLocal or catchThrows):
return
# Finally flags only make sense when finallying
if not doFinally and (finallyReturns or finallyThrows):
return
# tryFirstReturns is only relevant when both tryReturns and tryThrows are
# true.
if tryFirstReturns and not (tryReturns and tryThrows): return
# From the try and finally block, we can return or throw, but not both.
if catchReturns and catchThrows: return
if finallyReturns and finallyThrows: return
# If at the end we return the local, we need to have touched it.
if endReturnLocal and not (tryResultToLocal or catchWithLocal): return
# PRUNING
anyAlternative = any([alternativeFn1, alternativeFn2, alternativeFn3,
alternativeFn4, alternativeFn5])
specificAlternative = any([alternativeFn2, alternativeFn3])
rareAlternative = not specificAlternative
# If try returns and throws, then don't catchWithLocal, endReturnLocal, or
# deopt, or do any alternative.
if (tryReturns and tryThrows and
(catchWithLocal or endReturnLocal or deopt or anyAlternative)):
return
# We don't do any alternative if we do a finally.
if doFinally and anyAlternative: return
# We only use the local variable if we do alternative #2 or #3.
if ((tryResultToLocal or catchWithLocal or endReturnLocal) and
not specificAlternative):
return
# We don't need to test deopting into a finally.
if doFinally and deopt: return
# We're only interested in alternative #2 if we have endReturnLocal, no
# catchReturns, and no catchThrows, and deopt.
if (alternativeFn2 and
(not endReturnLocal or catchReturns or catchThrows or not deopt)):
return
# Flag check succeeded.
trueFlagNames = [name for (name, value) in flags._asdict().items() if value]
flagsMsgLine = " // Variant flags: [{}]".format(', '.join(trueFlagNames))
write(textwrap.fill(flagsMsgLine, subsequent_indent=' // '))
write("")
if not anyAlternative:
fragments = {
'increaseAndReturn15': 'increaseAndReturn15()',
'increaseAndThrow42': 'increaseAndThrow42()',
}
elif alternativeFn1:
fragments = {
'increaseAndReturn15': 'returnOrThrow(true)',
'increaseAndThrow42': 'returnOrThrow(false)',
}
elif alternativeFn2:
fragments = {
'increaseAndReturn15': 'increaseAndReturn15_calls_noopt()',
'increaseAndThrow42': 'increaseAndThrow42_calls_noopt()',
}
elif alternativeFn3:
fragments = {
'increaseAndReturn15': 'invertFunctionCall(increaseAndThrow42)',
'increaseAndThrow42': 'invertFunctionCall(increaseAndReturn15)',
}
elif alternativeFn4:
fragments = {
'increaseAndReturn15': '(new increaseAndStore15Constructor()).x',
'increaseAndThrow42': '(new increaseAndThrow42Constructor()).x',
}
else:
assert alternativeFn5
fragments = {
'increaseAndReturn15': 'magic.prop /* returns 15 */',
'increaseAndThrow42': '(magic.prop = 37 /* throws 42 */)',
}
# As we print code, we also maintain what the result should be. Variable
# {result} can be one of three things:
#
# - None, indicating returning JS null
# - ("return", n) with n an integer
# - ("throw", n), with n an integer
result = None
# We also maintain what the counter should be at the end.
# The counter is reset just before f is called.
counter = 0
write( " f = function {} () {{".format(fnname(flags)))
write( " var local = 888;")
write( " deopt = {};".format("true" if deopt else "false"))
local = 888
write( " try {")
write( " counter++;")
counter += 1
resultTo = "local +=" if tryResultToLocal else "return"
if tryReturns and not (tryThrows and not tryFirstReturns):
write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments))
if result == None:
counter += 1
if tryResultToLocal:
local += 19
else:
result = ("return", 19)
if tryThrows:
write( " {} 4 + {increaseAndThrow42};".format(resultTo, **fragments))
if result == None:
counter += 1
result = ("throw", 42)
if tryReturns and tryThrows and not tryFirstReturns:
write( " {} 4 + {increaseAndReturn15};".format(resultTo, **fragments))
if result == None:
counter += 1
if tryResultToLocal:
local += 19
else:
result = ("return", 19)
write( " counter++;")
if result == None:
counter += 1
if doCatch:
write( " } catch (ex) {")
write( " counter++;")
if isinstance(result, tuple) and result[0] == 'throw':
counter += 1
if catchThrows:
write(" throw 2 + ex;")
if isinstance(result, tuple) and result[0] == "throw":
result = ('throw', 2 + result[1])
elif catchReturns and catchWithLocal:
write(" return 2 + local;")
if isinstance(result, tuple) and result[0] == "throw":
result = ('return', 2 + local)
elif catchReturns and not catchWithLocal:
write(" return 2 + ex;");
if isinstance(result, tuple) and result[0] == "throw":
result = ('return', 2 + result[1])
elif catchWithLocal:
write(" local += ex;");
if isinstance(result, tuple) and result[0] == "throw":
local += result[1]
result = None
counter += 1
else:
if isinstance(result, tuple) and result[0] == "throw":
result = None
counter += 1
write( " counter++;")
if doFinally:
write( " } finally {")
write( " counter++;")
counter += 1
if finallyThrows:
write(" throw 25;")
result = ('throw', 25)
elif finallyReturns:
write(" return 3 + local;")
result = ('return', 3 + local)
elif not finallyReturns and not finallyThrows:
write(" local += 2;")
local += 2
counter += 1
else: assert False # unreachable
write( " counter++;")
write( " }")
write( " counter++;")
if result == None:
counter += 1
if endReturnLocal:
write( " return 5 + local;")
if result == None:
result = ('return', 5 + local)
write( " }")
if result == None:
write( " resetOptAndAssertResultEquals(undefined, f);")
else:
tag, value = result
if tag == "return":
write( " resetOptAndAssertResultEquals({}, f);".format(value))
else:
assert tag == "throw"
write( " resetOptAndAssertThrowsWith({}, f);".format(value))
write( " assertEquals({}, counter);".format(counter))
write( "")
global NUM_TESTS_PRINTED, NUM_TESTS_IN_SHARD
NUM_TESTS_PRINTED += 1
NUM_TESTS_IN_SHARD += 1
FILE = None # to be initialised to an open file
SHARD_NUM = 1
def write(*args):
return print(*args, file=FILE)
def rotateshard():
global FILE, NUM_TESTS_IN_SHARD, SHARD_SIZE
if MODE != 'shard':
return
if FILE != None and NUM_TESTS_IN_SHARD < SHARD_SIZE:
return
if FILE != None:
finishshard()
assert FILE == None
FILE = open(SHARD_FILENAME_TEMPLATE.format(shard=SHARD_NUM), 'w')
write_shard_header()
NUM_TESTS_IN_SHARD = 0
def finishshard():
global FILE, SHARD_NUM, MODE
assert FILE
write_shard_footer()
if MODE == 'shard':
print("Wrote shard {}.".format(SHARD_NUM))
FILE.close()
FILE = None
SHARD_NUM += 1
def write_shard_header():
if MODE == 'shard':
write("// Shard {}.".format(SHARD_NUM))
write("")
write(PREAMBLE)
write("")
def write_shard_footer():
write("}")
write("%NeverOptimizeFunction(runThisShard);")
write("")
write("// {} tests in this shard.".format(NUM_TESTS_IN_SHARD))
write("// {} tests up to here.".format(NUM_TESTS_PRINTED))
write("")
write("runThisShard();")
FLAGLETTERS="54321trflcrltfrtld"
flagtuple = namedtuple('flagtuple', (
"alternativeFn5",
"alternativeFn4",
"alternativeFn3",
"alternativeFn2",
"alternativeFn1",
"tryThrows",
"tryReturns",
"tryFirstReturns",
"tryResultToLocal",
"doCatch",
"catchReturns",
"catchWithLocal",
"catchThrows",
"doFinally",
"finallyReturns",
"finallyThrows",
"endReturnLocal",
"deopt"
))
emptyflags = flagtuple(*((False,) * len(flagtuple._fields)))
f1 = emptyflags._replace(tryReturns=True, doCatch=True)
# You can test function printtest with f1.
allFlagCombinations = [
flagtuple(*bools)
for bools in booltuples(len(flagtuple._fields))
]
if __name__ == '__main__':
global MODE
if sys.argv[1:] == []:
MODE = 'stdout'
print("// Printing all shards together to stdout.")
print("")
write_shard_header()
FILE = sys.stdout
elif sys.argv[1:] == ['--shard-and-overwrite']:
MODE = 'shard'
else:
print("Usage:")
print("")
print(" python {}".format(sys.argv[0]))
print(" print all tests to standard output")
print(" python {} --shard-and-overwrite".format(sys.argv[0]))
print(" print all tests to {}".format(SHARD_FILENAME_TEMPLATE))
print("")
print(sys.argv[1:])
print("")
sys.exit(1)
rotateshard()
for flags in allFlagCombinations:
printtest(flags)
rotateshard()
finishshard()
if MODE == 'shard':
print("Total: {} tests.".format(NUM_TESTS_PRINTED))
| mit |
redhat-openstack/python-openstackclient | openstackclient/volume/v1/volume.py | 1 | 13346 | # Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Volume v1 Volume action implementations"""
import argparse
import six
from openstackclient.common import command
from openstackclient.common import parseractions
from openstackclient.common import utils
from openstackclient.i18n import _
class CreateVolume(command.ShowOne):
"""Create new volume"""
def get_parser(self, prog_name):
parser = super(CreateVolume, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<name>',
help=_('Volume name'),
)
parser.add_argument(
'--size',
metavar='<size>',
required=True,
type=int,
help=_('Volume size in GB'),
)
parser.add_argument(
'--type',
metavar='<volume-type>',
help=_("Set the type of volume"),
)
parser.add_argument(
'--image',
metavar='<image>',
help=_('Use <image> as source of volume (name or ID)'),
)
snapshot_group = parser.add_mutually_exclusive_group()
snapshot_group.add_argument(
'--snapshot',
metavar='<snapshot>',
help=_('Use <snapshot> as source of volume (name or ID)'),
)
snapshot_group.add_argument(
'--snapshot-id',
metavar='<snapshot-id>',
help=argparse.SUPPRESS,
)
parser.add_argument(
'--source',
metavar='<volume>',
help=_('Volume to clone (name or ID)'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('Volume description'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify an alternate user (name or ID)'),
)
parser.add_argument(
'--project',
metavar='<project>',
help=_('Specify an alternate project (name or ID)'),
)
parser.add_argument(
'--availability-zone',
metavar='<availability-zone>',
help=_('Create volume in <availability-zone>'),
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help=_('Set a property on this volume '
'(repeat option to set multiple properties)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
image_client = self.app.client_manager.image
volume_client = self.app.client_manager.volume
source_volume = None
if parsed_args.source:
source_volume = utils.find_resource(
volume_client.volumes,
parsed_args.source,
).id
project = None
if parsed_args.project:
project = utils.find_resource(
identity_client.tenants,
parsed_args.project,
).id
user = None
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
image = None
if parsed_args.image:
image = utils.find_resource(
image_client.images,
parsed_args.image,
).id
snapshot = parsed_args.snapshot or parsed_args.snapshot_id
volume = volume_client.volumes.create(
parsed_args.size,
snapshot,
source_volume,
parsed_args.name,
parsed_args.description,
parsed_args.type,
user,
project,
parsed_args.availability_zone,
parsed_args.property,
image,
)
# Map 'metadata' column to 'properties'
volume._info.update(
{
'properties': utils.format_dict(volume._info.pop('metadata')),
'type': volume._info.pop('volume_type'),
},
)
return zip(*sorted(six.iteritems(volume._info)))
class DeleteVolume(command.Command):
"""Delete volume(s)"""
def get_parser(self, prog_name):
parser = super(DeleteVolume, self).get_parser(prog_name)
parser.add_argument(
'volumes',
metavar='<volume>',
nargs="+",
help=_('Volume(s) to delete (name or ID)'),
)
parser.add_argument(
'--force',
dest='force',
action='store_true',
default=False,
help=_('Attempt forced removal of volume(s), regardless of state '
'(defaults to False)'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
for volume in parsed_args.volumes:
volume_obj = utils.find_resource(
volume_client.volumes, volume)
if parsed_args.force:
volume_client.volumes.force_delete(volume_obj.id)
else:
volume_client.volumes.delete(volume_obj.id)
class ListVolume(command.Lister):
"""List volumes"""
def get_parser(self, prog_name):
parser = super(ListVolume, self).get_parser(prog_name)
parser.add_argument(
'--name',
metavar='<name>',
help=_('Filter results by volume name'),
)
parser.add_argument(
'--status',
metavar='<status>',
help=_('Filter results by status'),
)
parser.add_argument(
'--all-projects',
action='store_true',
default=False,
help=_('Include all projects (admin only)'),
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_('List additional fields in output'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
compute_client = self.app.client_manager.compute
def _format_attach(attachments):
"""Return a formatted string of a volume's attached instances
:param attachments: a volume.attachments field
:rtype: a string of formatted instances
"""
msg = ''
for attachment in attachments:
server = attachment['server_id']
if server in server_cache.keys():
server = server_cache[server].name
device = attachment['device']
msg += 'Attached to %s on %s ' % (server, device)
return msg
if parsed_args.long:
columns = (
'ID',
'Display Name',
'Status',
'Size',
'Volume Type',
'Bootable',
'Attachments',
'Metadata',
)
column_headers = (
'ID',
'Display Name',
'Status',
'Size',
'Type',
'Bootable',
'Attached to',
'Properties',
)
else:
columns = (
'ID',
'Display Name',
'Status',
'Size',
'Attachments',
)
column_headers = (
'ID',
'Display Name',
'Status',
'Size',
'Attached to',
)
# Cache the server list
server_cache = {}
try:
for s in compute_client.servers.list():
server_cache[s.id] = s
except Exception:
# Just forget it if there's any trouble
pass
search_opts = {
'all_tenants': parsed_args.all_projects,
'display_name': parsed_args.name,
'status': parsed_args.status,
}
data = volume_client.volumes.list(search_opts=search_opts)
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={'Metadata': utils.format_dict,
'Attachments': _format_attach},
) for s in data))
class SetVolume(command.Command):
"""Set volume properties"""
def get_parser(self, prog_name):
parser = super(SetVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar='<volume>',
help=_('Volume to modify (name or ID)'),
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('New volume name'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('New volume description'),
)
parser.add_argument(
'--size',
metavar='<size>',
type=int,
help=_('Extend volume size in GB'),
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help=_('Set a property on this volume '
'(repeat option to set multiple properties)'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = utils.find_resource(volume_client.volumes, parsed_args.volume)
if parsed_args.size:
if volume.status != 'available':
self.app.log.error(_("Volume is in %s state, it must be "
"available before size can be extended") %
volume.status)
return
if parsed_args.size <= volume.size:
self.app.log.error(_("New size must be greater than %s GB") %
volume.size)
return
volume_client.volumes.extend(volume.id, parsed_args.size)
if parsed_args.property:
volume_client.volumes.set_metadata(volume.id, parsed_args.property)
kwargs = {}
if parsed_args.name:
kwargs['display_name'] = parsed_args.name
if parsed_args.description:
kwargs['display_description'] = parsed_args.description
if kwargs:
volume_client.volumes.update(volume.id, **kwargs)
class ShowVolume(command.ShowOne):
"""Show volume details"""
def get_parser(self, prog_name):
parser = super(ShowVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar='<volume>',
help=_('Volume to display (name or ID)'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = utils.find_resource(volume_client.volumes, parsed_args.volume)
# Map 'metadata' column to 'properties'
volume._info.update(
{
'properties': utils.format_dict(volume._info.pop('metadata')),
'type': volume._info.pop('volume_type'),
},
)
if 'os-vol-tenant-attr:tenant_id' in volume._info:
volume._info.update(
{'project_id': volume._info.pop(
'os-vol-tenant-attr:tenant_id')}
)
return zip(*sorted(six.iteritems(volume._info)))
class UnsetVolume(command.Command):
"""Unset volume properties"""
def get_parser(self, prog_name):
parser = super(UnsetVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar='<volume>',
help=_('Volume to modify (name or ID)'),
)
parser.add_argument(
'--property',
metavar='<key>',
action='append',
default=[],
help=_('Remove a property from volume '
'(repeat option to remove multiple properties)'),
required=True,
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = utils.find_resource(
volume_client.volumes, parsed_args.volume)
if parsed_args.property:
volume_client.volumes.delete_metadata(
volume.id,
parsed_args.property,
)
| apache-2.0 |
keedio/hue | desktop/core/ext-py/Django-1.6.10/tests/many_to_one_null/tests.py | 111 | 4478 | from __future__ import absolute_import
from django.test import TestCase
from .models import Reporter, Article
class ManyToOneNullTests(TestCase):
def setUp(self):
# Create a Reporter.
self.r = Reporter(name='John Smith')
self.r.save()
# Create an Article.
self.a = Article(headline="First", reporter=self.r)
self.a.save()
# Create an Article via the Reporter object.
self.a2 = self.r.article_set.create(headline="Second")
# Create an Article with no Reporter by passing "reporter=None".
self.a3 = Article(headline="Third", reporter=None)
self.a3.save()
# Create another article and reporter
self.r2 = Reporter(name='Paul Jones')
self.r2.save()
self.a4 = self.r2.article_set.create(headline='Fourth')
def test_get_related(self):
self.assertEqual(self.a.reporter.id, self.r.id)
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
def test_created_via_related_set(self):
self.assertEqual(self.a2.reporter.id, self.r.id)
def test_related_set(self):
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='Fir'),
['<Article: First>'])
self.assertEqual(self.r.article_set.count(), 2)
def test_created_without_related(self):
self.assertEqual(self.a3.reporter, None)
# Need to reget a3 to refresh the cache
a3 = Article.objects.get(pk=self.a3.pk)
self.assertRaises(AttributeError, getattr, a3.reporter, 'id')
# Accessing an article's 'reporter' attribute returns None
# if the reporter is set to None.
self.assertEqual(a3.reporter, None)
# To retrieve the articles with no reporters set, use "reporter__isnull=True".
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Third>'])
# We can achieve the same thing by filtering for the case where the
# reporter is None.
self.assertQuerysetEqual(Article.objects.filter(reporter=None),
['<Article: Third>'])
# Set the reporter for the Third article
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.r.article_set.add(a3)
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>', '<Article: Third>'])
# Remove an article from the set, and check that it was removed.
self.r.article_set.remove(a3)
self.assertQuerysetEqual(self.r.article_set.all(),
['<Article: First>', '<Article: Second>'])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: Third>'])
def test_remove_from_wrong_set(self):
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
# Try to remove a4 from a set it does not belong to
self.assertRaises(Reporter.DoesNotExist, self.r.article_set.remove, self.a4)
self.assertQuerysetEqual(self.r2.article_set.all(), ['<Article: Fourth>'])
def test_assign_clear_related_set(self):
# Use descriptor assignment to allocate ForeignKey. Null is legal, so
# existing members of set that are not in the assignment set are set null
self.r2.article_set = [self.a2, self.a3]
self.assertQuerysetEqual(self.r2.article_set.all(),
['<Article: Second>', '<Article: Third>'])
# Clear the rest of the set
self.r.article_set.clear()
self.assertQuerysetEqual(self.r.article_set.all(), [])
self.assertQuerysetEqual(Article.objects.filter(reporter__isnull=True),
['<Article: First>', '<Article: Fourth>'])
def test_clear_efficiency(self):
r = Reporter.objects.create()
for _ in range(3):
r.article_set.create()
with self.assertNumQueries(1):
r.article_set.clear()
self.assertEqual(r.article_set.count(), 0)
| apache-2.0 |
heke123/chromium-crosswalk | chrome/common/extensions/docs/server2/mock_file_system.py | 53 | 4720 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
from file_system import FileSystem, FileNotFoundError
from future import Future
from test_file_system import _List, _StatTracker, TestFileSystem
from path_util import IsDirectory
class MockFileSystem(FileSystem):
'''Wraps FileSystems to add a selection of mock behaviour:
- asserting how often Stat/Read calls are being made to it.
- primitive changes/versioning via applying object "diffs", mapping paths to
new content (similar to how TestFileSystem works).
'''
def __init__(self, file_system):
self._file_system = file_system
# Updates are stored as TestFileSystems because it already implements a
# bunch of logic to intepret paths into dictionaries.
self._updates = []
self._stat_tracker = _StatTracker()
self._read_count = 0
self._read_resolve_count = 0
self._stat_count = 0
self._version = None
@staticmethod
def Create(file_system, updates):
mock_file_system = MockFileSystem(file_system)
for update in updates:
mock_file_system.Update(update)
return mock_file_system
#
# FileSystem implementation.
#
def Read(self, paths, skip_not_found=False):
'''Reads |paths| from |_file_system|, then applies the most recent update
from |_updates|, if any.
'''
self._read_count += 1
def next(result):
self._read_resolve_count += 1
for path in result.iterkeys():
update = self._GetMostRecentUpdate(path)
if update is not None:
result[path] = update
return result
return self._file_system.Read(paths,
skip_not_found=skip_not_found).Then(next)
def Refresh(self):
return self._file_system.Refresh()
def _GetMostRecentUpdate(self, path):
'''Returns the latest update for the file at |path|, or None if |path|
has never been updated.
'''
for update in reversed(self._updates):
try:
return update.ReadSingle(path).Get()
except FileNotFoundError:
pass
return None
def Stat(self, path):
self._stat_count += 1
# This only supports numeric stat values since we need to add to it. In
# reality the logic here could just be to randomly mutate the stat values
# every time there's an Update but that's less meaningful for testing.
def stradd(a, b):
return str(int(a) + b)
stat = self._file_system.Stat(path)
stat.version = stradd(stat.version, self._stat_tracker.GetVersion(path))
if stat.child_versions:
for child_path, child_version in stat.child_versions.iteritems():
stat.child_versions[child_path] = stradd(
stat.child_versions[child_path],
self._stat_tracker.GetVersion(posixpath.join(path, child_path)))
return stat
def GetCommitID(self):
return Future(value=str(self._stat_tracker.GetVersion('')))
def GetPreviousCommitID(self):
return Future(value=str(self._stat_tracker.GetVersion('') - 1))
def GetIdentity(self):
return self._file_system.GetIdentity()
def GetVersion(self):
return self._version
def __str__(self):
return repr(self)
def __repr__(self):
return 'MockFileSystem(read_count=%s, stat_count=%s, updates=%s)' % (
self._read_count, self._stat_count, len(self._updates))
#
# Testing methods.
#
def GetStatCount(self):
return self._stat_count
def CheckAndReset(self, stat_count=0, read_count=0, read_resolve_count=0):
'''Returns a tuple (success, error). Use in tests like:
self.assertTrue(*object_store.CheckAndReset(...))
'''
errors = []
for desc, expected, actual in (
('read_count', read_count, self._read_count),
('read_resolve_count', read_resolve_count, self._read_resolve_count),
('stat_count', stat_count, self._stat_count)):
if actual != expected:
errors.append('%s: expected %s got %s' % (desc, expected, actual))
try:
return (len(errors) == 0, ', '.join(errors))
finally:
self.Reset()
def Reset(self):
self._read_count = 0
self._read_resolve_count = 0
self._stat_count = 0
def Update(self, update):
self._updates.append(TestFileSystem(update))
for path in _List(update).iterkeys():
# Any files (not directories) which changed are now at the version
# derived from |_updates|.
if not IsDirectory(path):
self._stat_tracker.SetVersion(path, len(self._updates))
def SetVersion(self, version):
'''Override the reported FileSystem version (default None) for testing.'''
self._version = version
| bsd-3-clause |
pnelson/flask-informal | setup.py | 1 | 1035 | """
Flask-Informal
--------------
Adds form abstraction and helpers to your Flask application.
"""
from setuptools import setup
setup(
name="Flask-Informal",
version="0.1.0",
url="https://github.com/pnelson/flask-informal",
license="BSD",
author="Philip Nelson",
author_email="me@pnelson.ca",
description="Adds form abstraction and helpers to your Flask application.",
long_description=__doc__,
py_modules=["flask_informal"],
zip_safe=False,
include_package_data=True,
platforms="any",
install_requires=[
"Flask"
],
test_suite="tests.suite",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
| bsd-3-clause |
cyrixhero/Django-facebook | docs/docs_env/Lib/encodings/iso2022_jp.py | 816 | 1053 | #
# iso2022_jp.py: Python Unicode Codec for ISO2022_JP
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| bsd-3-clause |
mfherbst/spack | var/spack/repos/builtin.mock/packages/printing-package/package.py | 5 | 1789 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PrintingPackage(Package):
"""This package prints some output from its install method.
We use this to test whether that output is properly logged.
"""
homepage = "http://www.example.com/printing_package"
url = "http://www.unit-test-should-replace-this-url/trivial_install-1.0.tar.gz"
version('1.0', 'foobarbaz')
def install(self, spec, prefix):
print("BEFORE INSTALL")
configure('--prefix=%s' % prefix)
make()
make('install')
print("AFTER INSTALL")
| lgpl-2.1 |
40223210/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/unittest/main.py | 739 | 10385 | """Unittest main program"""
import sys
import optparse
import os
from . import loader, runner
from .signals import installHandler
__unittest = True
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
USAGE_AS_MAIN = """\
Usage: %(progName)s [options] [tests]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s test_module - run tests from test_module
%(progName)s module.TestClass - run tests from module.TestClass
%(progName)s module.Class.test_method - run specified test method
[tests] can be a list of any number of test modules, classes and test
methods.
Alternative Usage: %(progName)s discover [options]
Options:
-v, --verbose Verbose output
%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
-p pattern Pattern to match test files ('test*.py' default)
-t directory Top level directory of project (default to
start directory)
For test discovery all test modules must be importable from the top
level directory of the project.
"""
USAGE_FROM_MODULE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def _convert_name(name):
# on Linux / Mac OS X 'foo.PY' is not importable, but on
# Windows it is. Simpler to do a case insensitive match
# a better check would be to check that the name is a
# valid Python module name.
if os.path.isfile(name) and name.lower().endswith('.py'):
if os.path.isabs(name):
rel_path = os.path.relpath(name, os.getcwd())
if os.path.isabs(rel_path) or rel_path.startswith(os.pardir):
return name
name = rel_path
# on Windows both '\' and '/' are used as path
# separators. Better to replace both than rely on os.path.sep
return name[:-3].replace('\\', '.').replace('/', '.')
return name
def _convert_names(names):
return [_convert_name(name) for name in names]
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = USAGE_FROM_MODULE
# defaults for testing
failfast = catchbreak = buffer = progName = warnings = None
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=None, testLoader=loader.defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None, warnings=None):
if isinstance(module, str):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.failfast = failfast
self.catchbreak = catchbreak
self.verbosity = verbosity
self.buffer = buffer
if warnings is None and not sys.warnoptions:
# even if DreprecationWarnings are ignored by default
# print them anyway unless other warnings settings are
# specified by the warnings arg or the -W python flag
self.warnings = 'default'
else:
# here self.warnings is set either to the value passed
# to the warnings args or to None.
# If the user didn't pass a value self.warnings will
# be None. This means that the behavior is unchanged
# and depends on the values passed to -W.
self.warnings = warnings
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print(msg)
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
print(self.USAGE % usage)
sys.exit(2)
def parseArgs(self, argv):
if ((len(argv) > 1 and argv[1].lower() == 'discover') or
(len(argv) == 1 and self.module is None)):
self._do_discovery(argv[2:])
return
parser = self._getOptParser()
options, args = parser.parse_args(argv[1:])
self._setAttributesFromOptions(options)
if len(args) == 0 and self.module is None:
# this allows "python -m unittest -v" to still work for
# test discovery. This means -c / -b / -v / -f options will
# be handled twice, which is harmless but not ideal.
self._do_discovery(argv[1:])
return
if len(args) == 0 and self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif len(args) > 0:
self.testNames = _convert_names(args)
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
else:
self.testNames = (self.defaultTest,)
self.createTests()
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _getOptParser(self):
import optparse
parser = optparse.OptionParser()
parser.prog = self.progName
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
parser.add_option('-q', '--quiet', dest='quiet', default=False,
help='Quiet output', action='store_true')
if self.failfast != False:
parser.add_option('-f', '--failfast', dest='failfast', default=False,
help='Stop on first fail or error',
action='store_true')
if self.catchbreak != False:
parser.add_option('-c', '--catch', dest='catchbreak', default=False,
help='Catch ctrl-C and display results so far',
action='store_true')
if self.buffer != False:
parser.add_option('-b', '--buffer', dest='buffer', default=False,
help='Buffer stdout and stderr during tests',
action='store_true')
return parser
def _setAttributesFromOptions(self, options):
# only set options from the parsing here
# if they weren't set explicitly in the constructor
if self.failfast is None:
self.failfast = options.failfast
if self.catchbreak is None:
self.catchbreak = options.catchbreak
if self.buffer is None:
self.buffer = options.buffer
if options.verbose:
self.verbosity = 2
elif options.quiet:
self.verbosity = 0
def _addDiscoveryOptions(self, parser):
parser.add_option('-s', '--start-directory', dest='start', default='.',
help="Directory to start discovery ('.' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
def _do_discovery(self, argv, Loader=None):
if Loader is None:
Loader = lambda: self.testLoader
# handle command line args for test discovery
self.progName = '%s discover' % self.progName
parser = self._getOptParser()
self._addDiscoveryOptions(parser)
options, args = parser.parse_args(argv)
if len(args) > 3:
self.usageExit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(options, name, value)
self._setAttributesFromOptions(options)
start_dir = options.start
pattern = options.pattern
top_level_dir = options.top
loader = Loader()
self.test = loader.discover(start_dir, pattern, top_level_dir)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, type):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer,
warnings=self.warnings)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
| gpl-3.0 |
chongtianfeiyu/kbengine | kbe/res/scripts/common/Lib/ctypes/test/test_objects.py | 80 | 1682 | r'''
This tests the '_objects' attribute of ctypes instances. '_objects'
holds references to objects that must be kept alive as long as the
ctypes instance, to make sure that the memory buffer is valid.
WARNING: The '_objects' attribute is exposed ONLY for debugging ctypes itself,
it MUST NEVER BE MODIFIED!
'_objects' is initialized to a dictionary on first use, before that it
is None.
Here is an array of string pointers:
>>> from ctypes import *
>>> array = (c_char_p * 5)()
>>> print(array._objects)
None
>>>
The memory block stores pointers to strings, and the strings itself
assigned from Python must be kept.
>>> array[4] = b'foo bar'
>>> array._objects
{'4': b'foo bar'}
>>> array[4]
b'foo bar'
>>>
It gets more complicated when the ctypes instance itself is contained
in a 'base' object.
>>> class X(Structure):
... _fields_ = [("x", c_int), ("y", c_int), ("array", c_char_p * 5)]
...
>>> x = X()
>>> print(x._objects)
None
>>>
The'array' attribute of the 'x' object shares part of the memory buffer
of 'x' ('_b_base_' is either None, or the root object owning the memory block):
>>> print(x.array._b_base_) # doctest: +ELLIPSIS
<ctypes.test.test_objects.X object at 0x...>
>>>
>>> x.array[0] = b'spam spam spam'
>>> x._objects
{'0:2': b'spam spam spam'}
>>> x.array._b_base_._objects
{'0:2': b'spam spam spam'}
>>>
'''
import unittest, doctest, sys
import ctypes.test.test_objects
class TestCase(unittest.TestCase):
def test(self):
failures, tests = doctest.testmod(ctypes.test.test_objects)
self.assertFalse(failures, 'doctests failed, see output above')
if __name__ == '__main__':
doctest.testmod(ctypes.test.test_objects)
| lgpl-3.0 |
SystemsBioinformatics/stochpy | stochpy/core2/lex.py | 344 | 40739 | # -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2009,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = "3.3"
__tabversion__ = "3.2" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
| gpl-3.0 |
whn09/tensorflow | tensorflow/contrib/seq2seq/python/ops/basic_decoder.py | 3 | 5529 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class of Decoders that may sample to generate the next input.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.rnn import core_rnn_cell
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.util import nest
__all__ = [
"BasicDecoderOutput",
"BasicDecoder",
]
class BasicDecoderOutput(
collections.namedtuple("BasicDecoderOutput", ("rnn_output", "sample_id"))):
pass
class BasicDecoder(decoder.Decoder):
"""Basic sampling decoder."""
def __init__(self, cell, helper, initial_state, output_layer=None):
"""Initialize BasicDecoder.
Args:
cell: An `RNNCell` instance.
helper: A `Helper` instance.
initial_state: A (possibly nested tuple of...) tensors and TensorArrays.
The initial state of the RNNCell.
output_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output prior
to storing the result or sampling.
Raises:
TypeError: if `cell`, `helper` or `output_layer` have an incorrect type.
"""
if not isinstance(cell, core_rnn_cell.RNNCell):
raise TypeError("cell must be an RNNCell, received: %s" % type(cell))
if not isinstance(helper, helper_py.Helper):
raise TypeError("helper must be a Helper, received: %s" % type(helper))
if (output_layer is not None
and not isinstance(output_layer, layers_base._Layer)): # pylint: disable=protected-access
raise TypeError(
"output_layer must be a Layer, received: %s" % type(output_layer))
self._cell = cell
self._helper = helper
self._initial_state = initial_state
self._output_layer = output_layer
@property
def batch_size(self):
return self._helper.batch_size
def _rnn_output_size(self):
size = self._cell.output_size
if self._output_layer is None:
return size
else:
# To use layer's compute_output_shape, we need to convert the
# RNNCell's output_size entries into shapes with an unknown
# batch size. We then pass this through the layer's
# compute_output_shape and read off all but the first (batch)
# dimensions to get the output size of the rnn with the layer
# applied to the top.
output_shape_with_unknown_batch = nest.map_structure(
lambda s: tensor_shape.TensorShape([None]).concatenate(s),
size)
layer_output_shape = self._output_layer._compute_output_shape( # pylint: disable=protected-access
output_shape_with_unknown_batch)
return nest.map_structure(lambda s: s[1:], layer_output_shape)
@property
def output_size(self):
# Return the cell output and the id
return BasicDecoderOutput(
rnn_output=self._rnn_output_size(),
sample_id=tensor_shape.TensorShape([]))
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and int32 (the id)
dtype = nest.flatten(self._initial_state)[0].dtype
return BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size()),
dtypes.int32)
def initialize(self, name=None):
"""Initialize the decoder.
Args:
name: Name scope for any created operations.
Returns:
`(finished, first_inputs, initial_state)`.
"""
return self._helper.initialize() + (self._initial_state,)
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
with ops.name_scope(name, "BasicDecoderStep", (time, inputs, state)):
cell_outputs, cell_state = self._cell(inputs, state)
if self._output_layer is not None:
cell_outputs = self._output_layer(cell_outputs)
sample_ids = self._helper.sample(
time=time, outputs=cell_outputs, state=cell_state)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
outputs = BasicDecoderOutput(cell_outputs, sample_ids)
return (outputs, next_state, next_inputs, finished)
| apache-2.0 |
elkingtonmcb/rethinkdb | test/regression/issue_2738.py | 36 | 2159 | #!/usr/bin/env python
# Copyright 2010-2014 RethinkDB, all rights reserved.
from __future__ import print_function
import os, sys, time, threading, traceback
startTime = time.time()
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse
r = utils.import_python_driver()
op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv))
numNodes = 2
print("Starting cluster of %d servers (%.2fs)" % (numNodes, time.time() - startTime))
with driver.Cluster(initial_servers=numNodes, output_folder='.', wait_until_ready=True, command_prefix=command_prefix, extra_options=serve_options) as cluster:
server1 = cluster[0]
server2 = cluster[1]
print("Starting creation/destruction of db1 on server1 (%.2fs)" % (time.time() - startTime))
conn1 = r.connect("localhost", server1.driver_port)
stopping = False
def other_thread():
try:
while not stopping:
r.db_create("db1").run(conn1)
r.db_drop("db1").run(conn1)
except Exception, e:
traceback.print_exc(e)
sys.exit("Aborting because of error in side thread")
thr = threading.Thread(target=other_thread)
thr.start()
print("Starting creation/destruction of db2 on server2 (%.2fs)" % (time.time() - startTime))
conn2 = r.connect("localhost", server2.driver_port)
for i in xrange(1, 501):
r.db_create("db2").run(conn2)
r.db_drop("db2").run(conn2)
issues = list(r.db('rethinkdb').table('current_issues').run(conn2))
assert len(issues) == 0, 'Issues detected during testing: %s' % issues
if i % 50 == 0 or i == 1:
print(str(i), end='.. ')
sys.stdout.flush()
print("\nTesting completed (%.2fs)" % (time.time() - startTime))
stopping = True
thr.join()
# -- ending
print("Cleaning up (%.2fs)" % (time.time() - startTime))
print("Done. (%.2fs)" % (time.time() - startTime))
| agpl-3.0 |
ghisvail/vispy | vispy/visuals/visual.py | 17 | 21410 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Definitions
===========
Visual : an object that (1) can be drawn on-screen, (2) can be manipulated
by configuring the coordinate transformations that it uses.
View : a special type of visual that (1) draws the contents of another visual,
(2) using a different set of transforms. Views have only the basic visual
interface (draw, bounds, attach, etc.) and lack access to the specific features
of the visual they are linked to (for example, LineVisual has a ``set_data()``
method, but there is no corresponding method on a view of a LineVisual).
Class Structure
===============
* `BaseVisual` - provides transforms and view creation
This class lays out the basic API for all visuals: ``draw()``, ``bounds()``,
``view()``, and ``attach()`` methods, as well as a `TransformSystem` instance
that determines where the visual will be drawn.
* `Visual` - defines a shader program to draw.
Subclasses are responsible for supplying the shader code and configuring
program variables, including transforms.
* `VisualView` - clones the shader program from a Visual instance.
Instances of `VisualView` contain their own shader program,
transforms and filter attachments, and generally behave like a normal
instance of `Visual`.
* `CompoundVisual` - wraps multiple Visual instances.
These visuals provide no program of their own, but instead rely on one or
more internally generated `Visual` instances to do their drawing. For
example, a PolygonVisual consists of an internal LineVisual and
MeshVisual.
* `CompoundVisualView` - wraps multiple VisualView instances.
This allows a `CompoundVisual` to be viewed with a different set of
transforms and filters.
Making Visual Subclasses
========================
When making subclasses of `Visual`, it is only necessary to reimplement the
``_prepare_draw()``, ``_prepare_transforms()``, and ``_compute_bounds()``
methods. These methods will be called by the visual automatically when it is
needed for itself or for a view of the visual.
It is important to remember
when implementing these methods that most changes made to the visual's shader
program should also be made to the programs for each view. To make this easier,
the visual uses a `MultiProgram`, which allows all shader programs across the
visual and its views to be accessed simultaneously. For example::
def _prepare_draw(self, view):
# This line applies to the visual and all of its views
self.shared_program['a_position'] = self._vbo
# This line applies only to the view that is about to be drawn
view.view_program['u_color'] = (1, 1, 1, 1)
Under most circumstances, it is not necessary to reimplement `VisualView`
because a view will directly access the ``_prepare`` and ``_compute`` methods
from the visual it is viewing. However, if the `Visual` to be viewed is a
subclass that reimplements other methods such as ``draw()`` or ``bounds()``,
then it will be necessary to provide a new matching `VisualView` subclass.
Making CompoundVisual Subclasses
================================
Compound visual subclasses are generally very easy to construct::
class PlotLineVisual(visuals.CompoundVisual):
def __init__(self, ...):
self._line = LineVisual(...)
self._point = PointVisual(...)
visuals.CompoundVisual.__init__(self, [self._line, self._point])
A compound visual will automatically handle forwarding transform system changes
and filter attachments to its internally-wrapped visuals. To the user, this
will appear to behave as a single visual.
"""
from __future__ import division
import weakref
from .. import gloo
from ..util.event import EmitterGroup, Event
from ..util import logger, Frozen
from .shaders import StatementList, MultiProgram
from .transforms import TransformSystem
class VisualShare(object):
"""Contains data that is shared between all views of a visual.
This includes:
* GL state variables (blending, depth test, etc.)
* A weak dictionary of all views
* A list of filters that should be applied to all views
* A cache for bounds.
"""
def __init__(self):
# Note: in some cases we will need to compute bounds independently for
# each view. That will have to be worked out later..
self.bounds = {}
self.gl_state = {}
self.views = weakref.WeakKeyDictionary()
self.filters = []
self.visible = True
class BaseVisual(Frozen):
"""Superclass for all visuals.
This class provides:
* A TransformSystem.
* Two events: `update` and `bounds_change`.
* Minimal framework for creating views of the visual.
* A data structure that is shared between all views of the visual.
* Abstract `draw`, `bounds`, `attach`, and `detach` methods.
Parameters
----------
vshare : instance of VisualShare | None
The visual share.
Notes
-----
When used in the scenegraph, all Visual classes are mixed with
`vispy.scene.Node` in order to implement the methods, attributes and
capabilities required for their usage within it.
This subclasses Frozen so that subclasses can easily freeze their
properties.
"""
def __init__(self, vshare=None):
self._view_class = getattr(self, '_view_class', VisualView)
self._vshare = VisualShare() if vshare is None else vshare
self._vshare.views[self] = None
self.events = EmitterGroup(source=self,
auto_connect=True,
update=Event,
bounds_change=Event
)
self._transforms = None
self.transforms = TransformSystem()
@property
def transform(self):
return self.transforms.visual_transform.transforms[0]
@transform.setter
def transform(self, tr):
self.transforms.visual_transform = tr
@property
def transforms(self):
return self._transforms
@transforms.setter
def transforms(self, trs):
if trs is self._transforms:
return
if self._transforms is not None:
self._transforms.changed.disconnect(self._transform_changed)
self._transforms = trs
trs.changed.connect(self._transform_changed)
self._transform_changed()
def get_transform(self, map_from='visual', map_to='render'):
"""Return a transform mapping between any two coordinate systems.
Parameters
----------
map_from : str
The starting coordinate system to map from. Must be one of: visual,
scene, document, canvas, framebuffer, or render.
map_to : str
The ending coordinate system to map to. Must be one of: visual,
scene, document, canvas, framebuffer, or render.
"""
return self.transforms.get_transform(map_from, map_to)
@property
def visible(self):
return self._vshare.visible
@visible.setter
def visible(self, v):
if v != self._vshare.visible:
self._vshare.visible = v
self.update()
def view(self):
"""Return a new view of this visual.
"""
return self._view_class(self)
def draw(self):
raise NotImplementedError(self)
def attach(self, filt, view=None):
"""Attach a Filter to this visual.
Each filter modifies the appearance or behavior of the visual.
Parameters
----------
filt : object
The filter to attach.
view : instance of VisualView | None
The view to use.
"""
raise NotImplementedError(self)
def detach(self, filt, view=None):
"""Detach a filter.
Parameters
----------
filt : object
The filter to detach.
view : instance of VisualView | None
The view to use.
"""
raise NotImplementedError(self)
def bounds(self, axis, view=None):
"""Get the bounds of the Visual
Parameters
----------
axis : int
The axis.
view : instance of VisualView
The view to use.
"""
if view is None:
view = self
if axis not in self._vshare.bounds:
self._vshare.bounds[axis] = self._compute_bounds(axis, view)
return self._vshare.bounds[axis]
def _compute_bounds(self, axis, view):
raise NotImplementedError(self)
def _bounds_changed(self):
self._vshare.bounds.clear()
def update(self):
"""Update the Visual"""
self.events.update()
def _transform_changed(self, event=None):
self.update()
class BaseVisualView(object):
"""Base class for a view on a visual.
This class must be mixed with another Visual class to work properly. It
works mainly by forwarding the calls to _prepare_draw, _prepare_transforms,
and _compute_bounds to the viewed visual.
"""
def __init__(self, visual):
self._visual = visual
@property
def visual(self):
return self._visual
def _prepare_draw(self, view=None):
self._visual._prepare_draw(view=view)
def _prepare_transforms(self, view):
self._visual._prepare_transforms(view)
def _compute_bounds(self, axis, view):
self._visual._compute_bounds(axis, view)
def __repr__(self):
return '<%s on %r>' % (self.__class__.__name__, self._visual)
class Visual(BaseVisual):
"""Base class for all visuals that can be drawn using a single shader
program.
This class creates a MultiProgram, which is an object that
behaves like a normal shader program (you can assign shader code, upload
values, set template variables, etc.) but internally manages multiple
ModularProgram instances, one per view.
Subclasses generally only need to reimplement _compute_bounds,
_prepare_draw, and _prepare_transforms.
Parameters
----------
vcode : str
Vertex shader code.
fcode : str
Fragment shader code.
program : instance of Program | None
The program to use. If None, a program will be constructed using
``vcode`` and ``fcode``.
vshare : instance of VisualShare | None
The visual share, if necessary.
"""
def __init__(self, vcode='', fcode='', program=None, vshare=None):
self._view_class = VisualView
BaseVisual.__init__(self, vshare)
if vshare is None:
self._vshare.draw_mode = None
self._vshare.index_buffer = None
if program is None:
self._vshare.program = MultiProgram(vcode, fcode)
else:
self._vshare.program = program
if len(vcode) > 0 or len(fcode) > 0:
raise ValueError("Cannot specify both program and "
"vcode/fcode arguments.")
self._program = self._vshare.program.add_program()
self._prepare_transforms(self)
self._filters = []
self._hooks = {}
def set_gl_state(self, preset=None, **kwargs):
"""Define the set of GL state parameters to use when drawing
Parameters
----------
preset : str
Preset to use.
**kwargs : dict
Keyword arguments to `gloo.set_state`.
"""
self._vshare.gl_state = kwargs
self._vshare.gl_state['preset'] = preset
def update_gl_state(self, *args, **kwargs):
"""Modify the set of GL state parameters to use when drawing
Parameters
----------
*args : tuple
Arguments.
**kwargs : dict
Keyword argments.
"""
if len(args) == 1:
self._vshare.gl_state['preset'] = args[0]
elif len(args) != 0:
raise TypeError("Only one positional argument allowed.")
self._vshare.gl_state.update(kwargs)
def _compute_bounds(self, axis, view):
"""Return the (min, max) bounding values of this visual along *axis*
in the local coordinate system.
"""
return None
def _prepare_draw(self, view=None):
"""This visual is about to be drawn.
Visuals should implement this method to ensure that all program
and GL state variables are updated immediately before drawing.
Return False to indicate that the visual should not be drawn.
"""
return True
def _prepare_transforms(self, view):
"""This method is called whenever the TransformSystem instance is
changed for a view.
Assign a view's transforms to the proper shader template variables
on the view's shader program.
Note that each view has its own TransformSystem. In this method we
connect the appropriate mapping functions from the view's
TransformSystem to the view's program.
"""
raise NotImplementedError()
# Todo: this method can be removed if we somehow enable the shader
# to specify exactly which transform functions it needs by name. For
# example:
#
# // mapping function is automatically defined from the
# // corresponding transform in the view's TransformSystem
# gl_Position = visual_to_render(a_position);
#
@property
def shared_program(self):
return self._vshare.program
@property
def view_program(self):
return self._program
@property
def _draw_mode(self):
return self._vshare.draw_mode
@_draw_mode.setter
def _draw_mode(self, m):
self._vshare.draw_mode = m
@property
def _index_buffer(self):
return self._vshare.index_buffer
@_index_buffer.setter
def _index_buffer(self, buf):
self._vshare.index_buffer = buf
def draw(self):
if not self.visible:
return
self._configure_gl_state()
if self._prepare_draw(view=self) is False:
return
if self._vshare.draw_mode is None:
raise ValueError("_draw_mode has not been set for visual %r" %
self)
try:
self._program.draw(self._vshare.draw_mode,
self._vshare.index_buffer)
except Exception:
logger.warn("Error drawing visual %r" % self)
raise
def _configure_gl_state(self):
gloo.set_state(**self._vshare.gl_state)
def _get_hook(self, shader, name):
"""Return a FunctionChain that Filters may use to modify the program.
*shader* should be "frag" or "vert"
*name* should be "pre" or "post"
"""
assert name in ('pre', 'post')
key = (shader, name)
if key in self._hooks:
return self._hooks[key]
hook = StatementList()
if shader == 'vert':
self.view_program.vert[name] = hook
elif shader == 'frag':
self.view_program.frag[name] = hook
self._hooks[key] = hook
return hook
def attach(self, filt, view=None):
"""Attach a Filter to this visual
Each filter modifies the appearance or behavior of the visual.
Parameters
----------
filt : object
The filter to attach.
view : instance of VisualView | None
The view to use.
"""
if view is None:
self._vshare.filters.append(filt)
for view in self._vshare.views.keys():
filt._attach(view)
else:
view._filters.append(filt)
filt._attach(view)
def detach(self, filt, view=None):
"""Detach a filter.
Parameters
----------
filt : object
The filter to detach.
view : instance of VisualView | None
The view to use.
"""
if view is None:
self._vshare.filters.remove(filt)
for view in self._vshare.views.keys():
filt._detach(view)
else:
view._filters.remove(filt)
filt._detach(view)
class VisualView(BaseVisualView, Visual):
"""A view on another Visual instance.
View instances are created by calling ``visual.view()``.
Because this is a subclass of `Visual`, all instances of `VisualView`
define their own shader program (which is a clone of the viewed visual's
program), transforms, and filter attachments.
"""
def __init__(self, visual):
BaseVisualView.__init__(self, visual)
Visual.__init__(self, vshare=visual._vshare)
# Attach any shared filters
for filt in self._vshare.filters:
filt._attach(self)
class CompoundVisual(BaseVisual):
"""Visual consisting entirely of sub-visuals.
To the user, a compound visual behaves exactly like a normal visual--it
has a transform system, draw() and bounds() methods, etc. Internally, the
compound visual automatically manages proxying these transforms and methods
to its sub-visuals.
Parameters
----------
subvisuals : list of BaseVisual instances
The list of visuals to be combined in this compound visual.
"""
def __init__(self, subvisuals):
self._view_class = CompoundVisualView
self._subvisuals = []
BaseVisual.__init__(self)
for v in subvisuals:
self.add_subvisual(v)
self.freeze()
def add_subvisual(self, visual):
"""Add a subvisual
Parameters
----------
visual : instance of Visual
The visual to add.
"""
visual.transforms = self.transforms
visual._prepare_transforms(visual)
self._subvisuals.append(visual)
visual.events.update.connect(self._subv_update)
self.update()
def remove_subvisual(self, visual):
"""Remove a subvisual
Parameters
----------
visual : instance of Visual
The visual to remove.
"""
visual.events.update.disconnect(self._subv_update)
self._subvisuals.remove(visual)
self.update()
def _subv_update(self, event):
self.update()
def _transform_changed(self, event=None):
for v in self._subvisuals:
v.transforms = self.transforms
BaseVisual._transform_changed(self)
def draw(self):
"""Draw the visual
"""
if not self.visible:
return
if self._prepare_draw(view=self) is False:
return
for v in self._subvisuals:
if v.visible:
v.draw()
def _prepare_draw(self, view):
pass
def _prepare_transforms(self, view):
for v in view._subvisuals:
v._prepare_transforms(v)
def set_gl_state(self, preset=None, **kwargs):
"""Define the set of GL state parameters to use when drawing
Parameters
----------
preset : str
Preset to use.
**kwargs : dict
Keyword arguments to `gloo.set_state`.
"""
for v in self._subvisuals:
v.set_gl_state(preset=preset, **kwargs)
def update_gl_state(self, *args, **kwargs):
"""Modify the set of GL state parameters to use when drawing
Parameters
----------
*args : tuple
Arguments.
**kwargs : dict
Keyword argments.
"""
for v in self._subvisuals:
v.update_gl_state(*args, **kwargs)
def attach(self, filt, view=None):
"""Attach a Filter to this visual
Each filter modifies the appearance or behavior of the visual.
Parameters
----------
filt : object
The filter to attach.
view : instance of VisualView | None
The view to use.
"""
for v in self._subvisuals:
v.attach(filt, v)
def detach(self, filt, view=None):
"""Detach a filter.
Parameters
----------
filt : object
The filter to detach.
view : instance of VisualView | None
The view to use.
"""
for v in self._subvisuals:
v.detach(filt, v)
def _compute_bounds(self, axis, view):
bounds = None
for v in view._subvisuals:
if v.visible:
vb = v.bounds(axis)
if bounds is None:
bounds = vb
elif vb is not None:
bounds = [min(bounds[0], vb[0]), max(bounds[1], vb[1])]
return bounds
class CompoundVisualView(BaseVisualView, CompoundVisual):
def __init__(self, visual):
BaseVisualView.__init__(self, visual)
# Create a view on each sub-visual
subv = [v.view() for v in visual._subvisuals]
CompoundVisual.__init__(self, subv)
# Attach any shared filters
for filt in self._vshare.filters:
for v in self._subvisuals:
filt._attach(v)
| bsd-3-clause |
leyyin/university-PC | elearning/course/views.py | 1 | 7555 | #!/usr/bin/python
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import user_passes_test
from django.db.models import Q
from django.forms import modelformset_factory
from django.shortcuts import render, redirect
from elearning.course.forms import SimpleCourseForm, AdminEditCourseForm, TeacherEditCourseForm, AssignmentForm, \
ReadOnlyAssignmentForm, AssignStudentsForm
from elearning.models import Enrollment, Course, AssistantCourse, Assignment, StudentAssignment
from elearning.utils import get_current_user, create_forums_course
@login_required
def add_course(request):
if request.method == 'POST':
user = get_current_user(request)
form = SimpleCourseForm(request.POST)
if form.is_valid():
course = Course.objects.create(name=form.cleaned_data['name'], subject=form.cleaned_data['subject'],
teacher=user)
create_forums_course(course)
messages.success(request, "Course created")
return redirect('index')
else:
form = SimpleCourseForm()
return render(request, 'course/add_course.html', {'form': form})
@login_required
@user_passes_test(lambda u: u.groups.filter(name__in=['assistant', 'teacher']).exists())
def add_assignment(request, id):
if request.method == "POST":
form = AssignmentForm(request.POST)
if form.is_valid():
assignment = form.save(commit=False)
assignment.course = Course.objects.get(id=id)
assignment.save()
messages.success(request, "Assignment created")
else:
messages.error(request, "Assignment not created")
return redirect('index')
else:
course = Course.objects.get(id=id)
user = get_current_user(request)
if course.teacher != user and user not in course.assistants.all():
messages.error(request, "You are not allowed to add an assignment to this course.")
return redirect('index')
form = AssignmentForm()
return render(request, 'course/add_assignment.html', {'form': form, 'course': course})
@login_required
@user_passes_test(lambda u: u.groups.filter(name__in=['admin', 'teacher']).exists())
def see_courses(request):
user = get_current_user(request)
user_is_admin = request.user.groups.filter(name='admin').exists()
can_delete = True if user_is_admin else False
used_form = AdminEditCourseForm if user_is_admin else TeacherEditCourseForm
if user_is_admin:
CourseFormSet = modelformset_factory(Course, fields=('name', 'subject', 'teacher', 'assistants', 'students'),
can_delete=can_delete, form=used_form, max_num=1)
else:
CourseFormSet = modelformset_factory(Course, fields=('name', 'subject', 'assistants', 'students'),
can_delete=can_delete, form=used_form, max_num=1)
if request.method == 'POST':
formset = CourseFormSet(request.POST, request.FILES)
if user_is_admin:
if formset.is_valid():
for form in formset:
course = form.save(commit=False)
# TODO get rid of this UGLY WAY
Enrollment.objects.filter(course=course).delete()
for student in form.clean_students():
Enrollment.objects.create(user=student, course=course)
AssistantCourse.objects.filter(course=course).delete()
for assistant in form.clean_assistants():
AssistantCourse.objects.create(user=assistant, course=course)
course.save()
messages.success(request, "Courses successfully edited.")
else:
messages.error(request, str(formset.errors))
return redirect('index')
else:
if formset.is_valid():
for form in formset:
course = form.save(commit=False)
course.teacher = user
Enrollment.objects.filter(course=course).delete()
for student in form.clean_students():
Enrollment.objects.create(user=student, course=course)
AssistantCourse.objects.filter(course=course).delete()
for assistant in form.clean_assistants():
AssistantCourse.objects.create(user=assistant, course=course)
course.save()
messages.success(request, "Courses successfully edited.")
else:
messages.error(request, str(formset.errors))
return redirect('index')
else:
if user_is_admin:
formset = CourseFormSet(queryset=Course.objects.all())
else:
formset = CourseFormSet(queryset=Course.objects.filter(teacher=user))
return render(request, 'course/see_courses.html', {'formset': formset})
@login_required
def see_assignments(request, id):
user = get_current_user(request)
course = Course.objects.get(id=id)
can_delete = True if user == course.teacher or user in course.assistants.all() else False
AssignmentFormSet = modelformset_factory(Assignment,
fields=('id', 'name', 'description', 'deadline', 'type', 'group'),
can_delete=can_delete, form=AssignmentForm, max_num=0)
if request.method == 'POST':
formset = AssignmentFormSet(request.POST, request.FILES)
if formset.is_valid():
formset.save()
messages.success(request, "Assignments successfully saved.")
return redirect('index')
else:
if user.is_in_group("student"):
AssignmentFormSet.form = ReadOnlyAssignmentForm
formset = AssignmentFormSet(queryset=Assignment.objects.filter(Q(students__user=request.user) and
Q(course=course)).all())
else:
formset = AssignmentFormSet(queryset=Assignment.objects.filter(course=course).all())
if len(formset) == 0:
messages.info(request, "There are no assignments for this course")
return redirect('index')
else:
return render(request, 'course/see_assignments.html',
{'formset': formset, 'course': course, 'user_elearning': user})
@login_required()
def give_assignment_to_students(request, assignment_id):
if request.method == "POST":
assignment = Assignment.objects.get(id=assignment_id)
form = AssignStudentsForm(assignment, request.POST, request.FILES)
if form.is_valid():
for student in form.clean_students():
StudentAssignment.objects.get_or_create(user=student, assignment=assignment, grade=0)
messages.success(request, "Assignment successfully given.")
return redirect('index')
else:
assignment = Assignment.objects.get(id=assignment_id)
form = AssignStudentsForm(assignment)
return render(request, 'course/give_assignment_to_students.html', {'form': form, 'id': assignment_id})
# TODO
@login_required()
def give_assignment_to_groups(request, assignment_id):
if request.method == "POST":
pass
else:
messages.info(request, 'There are no available groups.')
return redirect('index')
| mit |
sid-kap/pants | tests/python/pants_test/tasks/test_scalastyle_integration.py | 6 | 1678 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class ScalastyleIntegrationTest(PantsRunIntegrationTest):
def test_scalastyle_cached(self):
with temporary_dir(root_dir=self.workdir_root()) as cache:
with temporary_dir(root_dir=self.workdir_root()) as workdir:
scalastyle_args = [
'clean-all',
'compile.scalastyle',
"--cache-write-to=['{}']".format(cache),
"--cache-read-from=['{}']".format(cache),
'examples/tests/scala/org/pantsbuild/example/hello/welcome',
'-ldebug'
]
pants_run = self.run_pants_with_workdir(scalastyle_args, workdir)
self.assert_success(pants_run)
self.assertIn('abc_Scalastyle_compile_scalastyle will write to local artifact cache',
pants_run.stdout_data)
pants_run = self.run_pants_with_workdir(scalastyle_args, workdir)
self.assert_success(pants_run)
self.assertIn('abc_Scalastyle_compile_scalastyle will read from local artifact cache',
pants_run.stdout_data)
# make sure we are *only* reading from the cache and not also writing,
# implying there was as a cache hit
self.assertNotIn('abc_Scalastyle_compile_scalastyle will write to local artifact cache',
pants_run.stdout_data)
| apache-2.0 |
rcbops/glance-buildpackage | glance/registry/db/api.py | 1 | 20568 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Defines interface for DB access
"""
import logging
from sqlalchemy import asc, create_engine, desc
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import or_, and_
from glance.common import cfg
from glance.common import exception
from glance.common import utils
from glance.registry.db import models
_ENGINE = None
_MAKER = None
BASE = models.BASE
sa_logger = None
logger = logging.getLogger(__name__)
# attributes common to all models
BASE_MODEL_ATTRS = set(['id', 'created_at', 'updated_at', 'deleted_at',
'deleted'])
IMAGE_ATTRS = BASE_MODEL_ATTRS | set(['name', 'status', 'size',
'disk_format', 'container_format',
'min_disk', 'min_ram', 'is_public',
'location', 'checksum', 'owner',
'protected'])
CONTAINER_FORMATS = ['ami', 'ari', 'aki', 'bare', 'ovf']
DISK_FORMATS = ['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi',
'iso']
STATUSES = ['active', 'saving', 'queued', 'killed', 'pending_delete',
'deleted']
db_opts = [
cfg.IntOpt('sql_idle_timeout', default=3600),
cfg.StrOpt('sql_connection', default='sqlite:///glance.sqlite'),
]
def configure_db(conf):
"""
Establish the database, create an engine if needed, and
register the models.
:param conf: Mapping of configuration options
"""
global _ENGINE, sa_logger, logger
if not _ENGINE:
conf.register_opts(db_opts)
timeout = conf.sql_idle_timeout
sql_connection = conf.sql_connection
try:
_ENGINE = create_engine(sql_connection, pool_recycle=timeout)
except Exception, err:
msg = _("Error configuring registry database with supplied "
"sql_connection '%(sql_connection)s'. "
"Got error:\n%(err)s") % locals()
logger.error(msg)
raise
sa_logger = logging.getLogger('sqlalchemy.engine')
if conf.debug:
sa_logger.setLevel(logging.DEBUG)
elif conf.verbose:
sa_logger.setLevel(logging.INFO)
models.register_models(_ENGINE)
def check_mutate_authorization(context, image_ref):
if not context.is_image_mutable(image_ref):
logger.info(_("Attempted to modify image user did not own."))
msg = _("You do not own this image")
raise exception.NotAuthorized(msg)
def get_session(autocommit=True, expire_on_commit=False):
"""Helper method to grab session"""
global _MAKER, _ENGINE
if not _MAKER:
assert _ENGINE
_MAKER = sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
return _MAKER()
def image_create(context, values):
"""Create an image from the values dictionary."""
return _image_update(context, values, None, False)
def image_update(context, image_id, values, purge_props=False):
"""
Set the given properties on an image and update it.
:raises NotFound if image does not exist.
"""
return _image_update(context, values, image_id, purge_props)
def image_destroy(context, image_id):
"""Destroy the image or raise if it does not exist."""
session = get_session()
with session.begin():
image_ref = image_get(context, image_id, session=session)
# Perform authorization check
check_mutate_authorization(context, image_ref)
image_ref.delete(session=session)
for prop_ref in image_ref.properties:
image_property_delete(context, prop_ref, session=session)
for memb_ref in image_ref.members:
image_member_delete(context, memb_ref, session=session)
def image_get(context, image_id, session=None, force_show_deleted=False):
"""Get an image or raise if it does not exist."""
session = session or get_session()
try:
query = session.query(models.Image).\
options(joinedload(models.Image.properties)).\
options(joinedload(models.Image.members)).\
filter_by(id=image_id)
# filter out deleted images if context disallows it
if not force_show_deleted and not can_show_deleted(context):
query = query.filter_by(deleted=False)
image = query.one()
except exc.NoResultFound:
raise exception.NotFound("No image found with ID %s" % image_id)
# Make sure they can look at it
if not context.is_image_visible(image):
raise exception.NotAuthorized("Image not visible to you")
return image
def image_get_all(context, filters=None, marker=None, limit=None,
sort_key='created_at', sort_dir='desc'):
"""
Get all images that match zero or more filters.
:param filters: dict of filter keys and values. If a 'properties'
key is present, it is treated as a dict of key/value
filters on the image properties attribute
:param marker: image id after which to start page
:param limit: maximum number of images to return
:param sort_key: image attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted (asc, desc)
"""
filters = filters or {}
session = get_session()
query = session.query(models.Image).\
options(joinedload(models.Image.properties)).\
options(joinedload(models.Image.members))
sort_dir_func = {
'asc': asc,
'desc': desc,
}[sort_dir]
sort_key_attr = getattr(models.Image, sort_key)
query = query.order_by(sort_dir_func(sort_key_attr))\
.order_by(sort_dir_func(models.Image.created_at))\
.order_by(sort_dir_func(models.Image.id))
if 'size_min' in filters:
query = query.filter(models.Image.size >= filters['size_min'])
del filters['size_min']
if 'size_max' in filters:
query = query.filter(models.Image.size <= filters['size_max'])
del filters['size_max']
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.Image.is_public == filters['is_public']]
if filters['is_public'] and context.owner is not None:
the_filter.extend([(models.Image.owner == context.owner),
models.Image.members.any(member=context.owner,
deleted=False)])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
del filters['is_public']
showing_deleted = False
if 'changes-since' in filters:
changes_since = filters.pop('changes-since')
query = query.filter(models.Image.updated_at > changes_since)
showing_deleted = True
if 'deleted' in filters:
deleted_filter = filters.pop('deleted')
query = query.filter_by(deleted=deleted_filter)
showing_deleted = deleted_filter
# TODO(bcwaldon): handle this logic in registry server
if not deleted_filter:
query = query.filter(models.Image.status != 'killed')
for (k, v) in filters.pop('properties', {}).items():
query = query.filter(models.Image.properties.any(name=k, value=v))
for (k, v) in filters.items():
if v is not None:
query = query.filter(getattr(models.Image, k) == v)
if marker != None:
# images returned should be created before the image defined by marker
marker_image = image_get(context, marker,
force_show_deleted=showing_deleted)
marker_value = getattr(marker_image, sort_key)
if sort_dir == 'desc':
query = query.filter(
or_(sort_key_attr < marker_value,
and_(sort_key_attr == marker_value,
models.Image.created_at < marker_image.created_at,
models.Image.id < marker)))
else:
query = query.filter(
or_(sort_key_attr > marker_value,
and_(sort_key_attr == marker_value,
models.Image.created_at > marker_image.created_at,
models.Image.id > marker)))
if limit != None:
query = query.limit(limit)
return query.all()
def _drop_protected_attrs(model_class, values):
"""
Removed protected attributes from values dictionary using the models
__protected_attributes__ field.
"""
for attr in model_class.__protected_attributes__:
if attr in values:
del values[attr]
def validate_image(values):
"""
Validates the incoming data and raises a Invalid exception
if anything is out of order.
:param values: Mapping of image metadata to check
"""
status = values.get('status')
disk_format = values.get('disk_format')
container_format = values.get('container_format')
status = values.get('status', None)
if not status:
msg = "Image status is required."
raise exception.Invalid(msg)
if status not in STATUSES:
msg = "Invalid image status '%s' for image." % status
raise exception.Invalid(msg)
if disk_format and disk_format not in DISK_FORMATS:
msg = "Invalid disk format '%s' for image." % disk_format
raise exception.Invalid(msg)
if container_format and container_format not in CONTAINER_FORMATS:
msg = "Invalid container format '%s' for image." % container_format
raise exception.Invalid(msg)
if disk_format in ('aki', 'ari', 'ami') or\
container_format in ('aki', 'ari', 'ami'):
if container_format != disk_format:
msg = ("Invalid mix of disk and container formats. "
"When setting a disk or container format to "
"one of 'ami', 'ari', or 'ami', the container "
"and disk formats must match.")
raise exception.Invalid(msg)
def _image_update(context, values, image_id, purge_props=False):
"""
Used internally by image_create and image_update
:param context: Request context
:param values: A dict of attributes to set
:param image_id: If None, create the image, otherwise, find and update it
"""
session = get_session()
with session.begin():
# Remove the properties passed in the values mapping. We
# handle properties separately from base image attributes,
# and leaving properties in the values mapping will cause
# a SQLAlchemy model error because SQLAlchemy expects the
# properties attribute of an Image model to be a list and
# not a dict.
properties = values.pop('properties', {})
if image_id:
image_ref = image_get(context, image_id, session=session)
# Perform authorization check
check_mutate_authorization(context, image_ref)
else:
if 'size' in values:
values['size'] = int(values['size'])
if 'min_ram' in values:
values['min_ram'] = int(values['min_ram'] or 0)
if 'min_disk' in values:
values['min_disk'] = int(values['min_disk'] or 0)
values['is_public'] = bool(values.get('is_public', False))
values['protected'] = bool(values.get('protected', False))
image_ref = models.Image()
# Need to canonicalize ownership
if 'owner' in values and not values['owner']:
values['owner'] = None
if image_id:
# Don't drop created_at if we're passing it in...
_drop_protected_attrs(models.Image, values)
image_ref.update(values)
# Validate the attributes before we go any further. From my
# investigation, the @validates decorator does not validate
# on new records, only on existing records, which is, well,
# idiotic.
validate_image(image_ref.to_dict())
try:
image_ref.save(session=session)
except IntegrityError, e:
raise exception.Duplicate("Image ID %s already exists!"
% values['id'])
_set_properties_for_image(context, image_ref, properties, purge_props,
session)
return image_get(context, image_ref.id)
def _set_properties_for_image(context, image_ref, properties,
purge_props=False, session=None):
"""
Create or update a set of image_properties for a given image
:param context: Request context
:param image_ref: An Image object
:param properties: A dict of properties to set
:param session: A SQLAlchemy session to use (if present)
"""
orig_properties = {}
for prop_ref in image_ref.properties:
orig_properties[prop_ref.name] = prop_ref
for name, value in properties.iteritems():
prop_values = {'image_id': image_ref.id,
'name': name,
'value': value}
if name in orig_properties:
prop_ref = orig_properties[name]
image_property_update(context, prop_ref, prop_values,
session=session)
else:
image_property_create(context, prop_values, session=session)
if purge_props:
for key in orig_properties.keys():
if not key in properties:
prop_ref = orig_properties[key]
image_property_delete(context, prop_ref, session=session)
def image_property_create(context, values, session=None):
"""Create an ImageProperty object"""
prop_ref = models.ImageProperty()
return _image_property_update(context, prop_ref, values, session=session)
def image_property_update(context, prop_ref, values, session=None):
"""Update an ImageProperty object"""
return _image_property_update(context, prop_ref, values, session=session)
def _image_property_update(context, prop_ref, values, session=None):
"""
Used internally by image_property_create and image_property_update
"""
_drop_protected_attrs(models.ImageProperty, values)
values["deleted"] = False
prop_ref.update(values)
prop_ref.save(session=session)
return prop_ref
def image_property_delete(context, prop_ref, session=None):
"""
Used internally by image_property_create and image_property_update
"""
prop_ref.update(dict(deleted=True))
prop_ref.save(session=session)
return prop_ref
def image_member_create(context, values, session=None):
"""Create an ImageMember object"""
memb_ref = models.ImageMember()
return _image_member_update(context, memb_ref, values, session=session)
def image_member_update(context, memb_ref, values, session=None):
"""Update an ImageMember object"""
return _image_member_update(context, memb_ref, values, session=session)
def _image_member_update(context, memb_ref, values, session=None):
"""
Used internally by image_member_create and image_member_update
"""
_drop_protected_attrs(models.ImageMember, values)
values["deleted"] = False
values.setdefault('can_share', False)
memb_ref.update(values)
memb_ref.save(session=session)
return memb_ref
def image_member_delete(context, memb_ref, session=None):
"""Delete an ImageMember object"""
session = session or get_session()
memb_ref.update(dict(deleted=True))
memb_ref.save(session=session)
return memb_ref
def image_member_get(context, member_id, session=None):
"""Get an image member or raise if it does not exist."""
session = session or get_session()
try:
query = session.query(models.ImageMember).\
options(joinedload(models.ImageMember.image)).\
filter_by(id=member_id)
if not can_show_deleted(context):
query = query.filter_by(deleted=False)
member = query.one()
except exc.NoResultFound:
raise exception.NotFound("No membership found with ID %s" % member_id)
# Make sure they can look at it
if not context.is_image_visible(member.image):
raise exception.NotAuthorized("Image not visible to you")
return member
def image_member_find(context, image_id, member, session=None):
"""Find a membership association between image and member."""
session = session or get_session()
try:
# Note lack of permissions check; this function is called from
# RequestContext.is_image_visible(), so avoid recursive calls
query = session.query(models.ImageMember).\
options(joinedload(models.ImageMember.image)).\
filter_by(image_id=image_id).\
filter_by(member=member)
if not can_show_deleted(context):
query = query.filter_by(deleted=False)
return query.one()
except exc.NoResultFound:
raise exception.NotFound("No membership found for image %s member %s" %
(image_id, member))
def image_member_get_memberships(context, member, marker=None, limit=None,
sort_key='created_at', sort_dir='desc'):
"""
Get all image memberships for the given member.
:param member: the member to look up memberships for
:param marker: membership id after which to start page
:param limit: maximum number of memberships to return
:param sort_key: membership attribute by which results should be sorted
:param sort_dir: direction in which results should be sorted (asc, desc)
"""
session = get_session()
query = session.query(models.ImageMember).\
options(joinedload(models.ImageMember.image)).\
filter_by(member=member)
if not can_show_deleted(context):
query = query.filter_by(deleted=False)
sort_dir_func = {
'asc': asc,
'desc': desc,
}[sort_dir]
sort_key_attr = getattr(models.ImageMember, sort_key)
query = query.order_by(sort_dir_func(sort_key_attr)).\
order_by(sort_dir_func(models.ImageMember.id))
if marker != None:
# memberships returned should be created before the membership
# defined by marker
marker_membership = image_member_get(context, marker)
marker_value = getattr(marker_membership, sort_key)
if sort_dir == 'desc':
query = query.filter(
or_(sort_key_attr < marker_value,
and_(sort_key_attr == marker_value,
models.ImageMember.id < marker)))
else:
query = query.filter(
or_(sort_key_attr > marker_value,
and_(sort_key_attr == marker_value,
models.ImageMember.id > marker)))
if limit != None:
query = query.limit(limit)
return query.all()
# pylint: disable-msg=C0111
def can_show_deleted(context):
"""
Calculates whether to include deleted objects based on context.
Currently just looks for a flag called deleted in the context dict.
"""
if hasattr(context, 'show_deleted'):
return context.show_deleted
if not hasattr(context, 'get'):
return False
return context.get('deleted', False)
| apache-2.0 |
msebire/intellij-community | python/lib/Lib/site-packages/django/utils/unittest/runner.py | 571 | 6761 | """Running tests"""
import sys
import time
import unittest
from django.utils.unittest import result
try:
from django.utils.unittest.signals import registerResult
except ImportError:
def registerResult(_):
pass
__unittest = True
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped %r" % (reason,))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
def stopTestRun(self):
super(TextTestResult, self).stopTestRun()
self.printErrors()
class TextTestRunner(unittest.TextTestRunner):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
resultclass = TextTestResult
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
if resultclass is not None:
self.resultclass = resultclass
def _makeResult(self):
return self.resultclass(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
result.failfast = self.failfast
result.buffer = self.buffer
registerResult(result)
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
else:
result.printErrors()
stopTime = time.time()
timeTaken = stopTime - startTime
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
expectedFails, unexpectedSuccesses, skipped = results
except AttributeError:
pass
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
| apache-2.0 |
metnum/2013-2c-tp3 | informe/experimentos/exp_iteraciones.py | 1 | 43572 | #coding=UTF8
resultados = {}
resultados[90] = {}
resultados[95] = {}
resultados[99] = {}
# .90
resultados[90]['sin'] = [1.2586022145, 0.7671812416, 0.4686780482, 0.3109813673, 0.2243179286, 0.1734975739, 0.1388999921, 0.1130736304, 0.0931093471, 0.077780553, 0.0656376273, 0.0557321076, 0.047637242, 0.0407448348, 0.0350325007, 0.0301262333, 0.0260517623, 0.0224732582, 0.0195050308, 0.0168737721, 0.0147039698, 0.0127521434, 0.0111607192, 0.0096939316, 0.0085117294, 0.007395436, 0.0065148113, 0.0056580955, 0.0050011042, 0.0043360199, 0.0038460413, 0.003325886, 0.0029619249, 0.0025515126, 0.0022833042, 0.0019562146, 0.0017611441, 0.0014977689, 0.0013579789, 0.0011430725, 0.0010459273, 0.0008678853, 0.0008037283, 0.0006545684, 0.0006154666, 0.0004885018, 0.0004690273, 0.0003588374, 0.0003547168, 0.0002579069, 0.0002653148, 0.0001788552, 0.0001956697, 0.0001168107, 0.0001410654, 6.84E-005, 9.84E-005, 3.02E-005, 6.55E-005
]
resultados[90]['5ta'] = [1.2585999457, 0.7671797613, 0.4686709649, 0.3109749394, 0.2243068395, 0.1701782215, 0.1366320204, 0.1111415883, 0.0914089494, 0.0763306975, 0.0643632822, 0.0545934134, 0.0466178288, 0.0398406051, 0.0342205353, 0.029401107, 0.0253959686, 0.0218897383, 0.0189763378, 0.0164022089, 0.0142755821, 0.0123688281, 0.010810952, 0.0093818766, 0.0082256751, 0.0071402539, 0.0062798482, 0.0054484009, 0.0048069286, 0.0041632764, 0.0036846438, 0.0031825102, 0.0028272006, 0.0024315946, 0.0021696732, 0.0018555415, 0.0016643747, 0.0014121795, 0.0012752117, 0.0010696546, 0.0009740878, 0.0008046005, 0.0007405387, 0.0005990868, 0.0005596466, 0.0004392105, 0.0004188789, 0.0003149934, 0.0003089866, 0.0002180227, 0.0002235849, 0.0001421389, 0.0001568358, 8.30E-005, 0.0001046136, 3.67E-005, 6.45E-005
]
resultados[90]['10'] = [1.258586904, 0.7671797539, 0.4686336913, 0.3109526511, 0.2242530567, 0.1734490296, 0.1388176933, 0.1130188142, 0.0930180885, 0.0777035399, 0.0411199481, 0.0346033239, 0.029406619, 0.025084159, 0.0215304887, 0.0185125625, 0.0160091621, 0.0138335584, 0.0120139577, 0.0104216552, 0.003051018, 0.002571817, 0.0022161598, 0.0018861034, 0.0016294538, 0.0013811737, 0.0011933426, 0.0009969224, 0.0008644624, 0.0007145186, 0.0006168811, 0.0004979712, 0.0004241002, 0.0003255013, 0.0002744365, 0.0001949398, 0.0001554752, 9.42E-005, 6.55E-005
]
resultados[90]['5'] = [1.258580021, 0.7671651189, 0.4686068391, 0.310916755, 0.2242073652, 0.1700924402, 0.1365076611, 0.1110409689, 0.0912750169, 0.0762074218, 0.0386265409, 0.0323447133, 0.0273285027, 0.0232463341, 0.0199251286, 0.013580212, 0.0116908479, 0.0100787668, 0.0087154444, 0.0075434882, 0.0060847997, 0.0052444735, 0.0045551845, 0.0039428158, 0.0034280814, 0.002969147, 0.0025796874, 0.0022117528, 0.0019125613, 0.0016364484, 0.0014222241, 0.0011973188, 0.0010336672, 0.0008585423, 0.0007256612, 0.0005669732, 0.0004751708, 0.000364246, 0.0002987834, 0.000212505, 0.000173225, 0.0001070269, 6.81E-005
]
resultados[90]['4'] = [1.2585824996, 0.7671727324, 0.4686219149, 0.3109364477, 0.2190213152, 0.1696019223, 0.1360340122, 0.1109060732, 0.0675852221, 0.0563598637, 0.0473084359, 0.0401333153, 0.0337656175, 0.0288833754, 0.0248045272, 0.0213192404, 0.0097478623, 0.0083544856, 0.0071864933, 0.0062051067, 0.005452032, 0.0047213923, 0.0041124216, 0.0035688596, 0.0029772482, 0.0025683874, 0.0022399099, 0.0019208687, 0.0016924247, 0.0014509621, 0.0012544261, 0.0010558994, 0.0008827689, 0.0007335454, 0.0006210078, 0.0005084038, 0.0004475526, 0.0003458775, 0.0002911767, 0.0002106254, 0.0001512352, 9.40E-005, 6.36E-005
]
# .95
resultados[95]['sin'] = [1.3803665025, 0.9286457809, 0.6371578024, 0.4742344634, 0.3806845647, 0.3205528219, 0.2765409499, 0.2423913034, 0.2148269652, 0.1926639691, 0.1735891127, 0.1570938386, 0.1426456967, 0.1299277901, 0.1185063385, 0.1083478012, 0.0991773351, 0.0910026831, 0.0836180503, 0.0770160514, 0.0709352897, 0.0654879801, 0.0604493274, 0.0559250323, 0.0517044083, 0.0479082552, 0.0443477379, 0.0411523881, 0.0381315426, 0.0354350356, 0.0328732757, 0.0305949939, 0.0284083574, 0.0264683441, 0.0245899827, 0.0229342074, 0.02132253, 0.0199110985, 0.0185192028, 0.0173094987, 0.0161074503, 0.0150737039, 0.0140324738, 0.0131475003, 0.0122430857, 0.0114830808, 0.010693486, 0.0100404411, 0.0093510785, 0.0087902467, 0.0081854184, 0.0077028127, 0.0071702696, 0.0067546884, 0.0062856577, 0.0059308784, 0.0055170592, 0.0052122367, 0.0048446861, 0.0045825802, 0.0042552114, 0.0040307105, 0.003737969, 0.0035461745, 0.0032840122, 0.003120379, 0.0028849018, 0.0027463413, 0.002533904, 0.0024172563, 0.002225397, 0.0021278117, 0.001954108, 0.0018735802, 0.0017154149, 0.0016496656, 0.0015053736, 0.0014522687, 0.0013201835, 0.0012784778, 0.0011566369, 0.001124939, 0.0010125004, 0.0009892694, 0.0008850916, 0.0008696798, 0.0007723945, 0.0007638729, 0.0006730161, 0.000670197, 0.0005850046, 0.0005876137, 0.000507039, 0.0005144666, 0.0004383545, 0.0004496612, 0.0003774896, 0.0003926092, 0.0003235191, 0.0003420331, 0.0002760091, 0.0002970681, 0.0002337335, 0.0002574273, 0.0001960566, 0.0002221174, 0.0001628601, 0.0001906252, 0.0001332461, 0.0001629316, 0.0001067888, 0.0001382106, 8.35E-005, 0.0001161163, 6.28E-005, 9.69E-005, 4.41E-005, 7.97E-005, 2.78E-005, 6.44E-005, 1.32E-005, 5.14E-005
]
resultados[95]['5ta'] = [1.3803648603, 0.9286419031, 0.63715003, 0.4742247688, 0.3806724238, 0.3164265272, 0.2734476122, 0.2395581565, 0.2122237253, 0.1902733236, 0.1714334219, 0.155086687, 0.1407424925, 0.1281376519, 0.1168140264, 0.106748087, 0.0976635082, 0.0895708032, 0.0822608007, 0.0757322726, 0.0697214163, 0.0643368713, 0.0593590597, 0.0548916421, 0.0507253847, 0.0469804763, 0.0434679722, 0.0403167224, 0.0373410478, 0.0346814699, 0.032160128, 0.0299145139, 0.0277645932, 0.0258539605, 0.0240083529, 0.0223792658, 0.0207962488, 0.0194096799, 0.0180427481, 0.0168565736, 0.0156761571, 0.014663549, 0.0136415576, 0.0127753511, 0.0118883515, 0.0111453394, 0.0103712194, 0.0097332317, 0.0090575348, 0.0085104251, 0.0079179171, 0.0074478188, 0.0069263087, 0.0065218247, 0.0060621715, 0.0057171909, 0.0053122206, 0.0050165552, 0.0046569208, 0.0044030658, 0.0040827872, 0.003865736, 0.0035792933, 0.0033942859, 0.0031377357, 0.0029802162, 0.0027497737, 0.0026167495, 0.0024087057, 0.0022971757, 0.0021091263, 0.0020160206, 0.0018457337, 0.0017693464, 0.0016141684, 0.0015523111, 0.0014105947, 0.0013609805, 0.0012311962, 0.0011927455, 0.0010728845, 0.001044264, 0.0009334744, 0.0009130326, 0.0008103264, 0.0007974877, 0.0007014, 0.0006953463, 0.0006054807, 0.0006048992, 0.000520592, 0.0005252505, 0.0004453412, 0.0004547343, 0.0003790907, 0.0003921733, 0.0003204144, 0.0003372306, 0.0002683328, 0.0002885088, 0.0002225518, 0.0002452164, 0.0001819145, 0.0002071928, 0.0001456793, 0.000173362, 0.0001138399, 0.0001431618, 8.54E-005, 0.0001167047, 6.00E-005, 9.32E-005, 3.78E-005, 7.24E-005, 1.79E-005, 5.45E-005
]
resultados[95]['10'] = [1.3803741194, 0.9285758757, 0.6371022811, 0.4740981019, 0.3805836208, 0.3203898788, 0.2764167376, 0.2421929249, 0.2146558063, 0.1924296163, 0.1267977875, 0.1142523056, 0.1036518111, 0.0944418967, 0.0863469337, 0.0791937207, 0.0728263099, 0.0670205419, 0.0617993891, 0.0570143634, 0.0244339581, 0.0224600853, 0.0207527018, 0.0191925808, 0.0178085741, 0.0165234895, 0.0153653021, 0.0142638479, 0.0132888795, 0.0123644758, 0.011610564, 0.0107953271, 0.0100660247, 0.0093685912, 0.008748696, 0.0081471939, 0.0076087529, 0.0070888118, 0.0066221022, 0.0061602374, 0.0057079629, 0.0053120697, 0.0049623236, 0.0046163328, 0.0043120056, 0.0040019239, 0.0037426899, 0.0034708433, 0.0032379056, 0.0030028678, 0.0028062956, 0.0025906516, 0.0024186982, 0.002230815, 0.0020755522, 0.0019121133, 0.0017783564, 0.0016264172, 0.0015149436, 0.0013818765, 0.0012885784, 0.0011745178, 0.0010882093, 0.0009790379, 0.0009078481, 0.0008127842, 0.0007452642, 0.000663712, 0.000608512, 0.0005279476, 0.0004904272, 0.0004207093, 0.0003768604, 0.0003166181, 0.0002833383, 0.0002218617, 0.0001987494, 0.0001451058, 0.0001204552, 7.46E-005, 5.41E-005
]
resultados[95]['5'] = [1.380356845, 0.9285696889, 0.6370561407, 0.4740685954, 0.3805195381, 0.3162326592, 0.2732654523, 0.2393225699, 0.2119791527, 0.1899921004, 0.1211865826, 0.1089045454, 0.0985231598, 0.0897653274, 0.0820330684, 0.0648338142, 0.0595244898, 0.0547305069, 0.0504403056, 0.0466009541, 0.0410917521, 0.0380108052, 0.0351826882, 0.0326129747, 0.0302282785, 0.0281687539, 0.0261126741, 0.0242338958, 0.02249007, 0.0209015849, 0.0194877628, 0.0181233459, 0.0168310041, 0.0156574647, 0.0145543922, 0.0133123845, 0.0123721785, 0.0115309894, 0.0107139065, 0.0099797567, 0.0094551263, 0.0088092791, 0.0081822395, 0.007627966, 0.007086902, 0.0064721637, 0.006012905, 0.0056024009, 0.0051930415, 0.0048439525, 0.0045415377, 0.0042253514, 0.0039167335, 0.0036465053, 0.0033667524, 0.0031185525, 0.0028779566, 0.0026687301, 0.0024592308, 0.0022823097, 0.0021179893, 0.001966381, 0.00179817, 0.0016570933, 0.0015127957, 0.001375192, 0.001238142, 0.0011423552, 0.0010257694, 0.0009337053, 0.0008490567, 0.0007733783, 0.0006739905, 0.0006137119, 0.0005301854, 0.0004672469, 0.0003960866, 0.0003498304, 0.0002749761, 0.0002415722, 0.0001797636, 0.0001422383, 9.00E-005, 6.28E-005
]
resultados[95]['4'] = [1.3803694782, 0.9285563387, 0.6370745155, 0.4740608409, 0.3743791672, 0.3152538897, 0.2724495297, 0.2391865965, 0.1739513246, 0.1556572134, 0.139947552, 0.1267322492, 0.1142115873, 0.1040521657, 0.0949868097, 0.0869854725, 0.0516767543, 0.0473831997, 0.0435705202, 0.0402107813, 0.0375302736, 0.0347587569, 0.0322306621, 0.0298873714, 0.0270077073, 0.0250659368, 0.0232919243, 0.0216309227, 0.0203000943, 0.0188580777, 0.0175459261, 0.0163036492, 0.0148792473, 0.0138405568, 0.012891173, 0.0119874723, 0.0113504531, 0.0105543817, 0.0098304189, 0.0091442203, 0.0083547875, 0.0077642406, 0.0072430523, 0.0067316826, 0.0063245806, 0.005877311, 0.0054779167, 0.005079397, 0.0047689586, 0.0044228459, 0.0041164156, 0.0038160295, 0.0035288231, 0.0032579583, 0.0030331901, 0.0027998271, 0.0026090958, 0.0024045495, 0.0022317091, 0.0020425316, 0.0019193654, 0.0017552712, 0.0016179064, 0.0014767712, 0.0013463565, 0.0012126928, 0.0011178905, 0.0010049578, 0.0009229115, 0.0008246676, 0.0007511843, 0.0006540335, 0.0006061801, 0.0005231943, 0.0004630374, 0.0003931899, 0.0003392152, 0.0002666537, 0.0002333786, 0.0001726335, 0.0001393133, 8.81E-005, 6.13E-005
]
# .99
resultados[99]['sin'] = [1.5300150632, 1.1737828194, 0.9463858745, 0.8160111245, 0.7382764403, 0.6851999463, 0.6463060851, 0.6124955348, 0.5836247158, 0.557994168, 0.534923668, 0.5140026534, 0.4947596543, 0.4769856418, 0.4604484685, 0.444990178, 0.4303535115, 0.4165850954, 0.4035267053, 0.3911247122, 0.379287659, 0.3680140597, 0.3572024458, 0.3469179087, 0.3371795129, 0.3279742218, 0.3192803494, 0.3110080044, 0.3030526636, 0.2955013276, 0.2882328348, 0.2812628798, 0.2745256852, 0.2680199373, 0.2616962547, 0.2555838316, 0.2496621073, 0.2439520146, 0.2383859558, 0.2330124789, 0.2277785657, 0.2227320641, 0.2178039487, 0.2130385594, 0.2083909294, 0.2038947167, 0.1995000812, 0.1952529495, 0.1911110286, 0.1871230491, 0.1832220995, 0.1794544491, 0.1757578317, 0.1721849405, 0.1686829424, 0.1653200445, 0.162044772, 0.1588948526, 0.1558357186, 0.152892867, 0.1500103821, 0.1472148421, 0.1444670677, 0.141796518, 0.1391710621, 0.1366231945, 0.1341120888, 0.1316791782, 0.1292762285, 0.1269543968, 0.1246815292, 0.122502331, 0.1203590258, 0.1182862649, 0.1162332379, 0.1142454581, 0.1122907278, 0.1104087214, 0.1085452559, 0.1067423172, 0.1049508537, 0.1032179948, 0.1014981535, 0.0998366659, 0.0981842802, 0.0965882655, 0.0949984598, 0.0934670426, 0.0919396283, 0.0904663892, 0.0889971186, 0.0875798309, 0.0861635168, 0.0847978536, 0.083431966, 0.0821137533, 0.0807955175, 0.0795269409, 0.0782641557, 0.0770558238, 0.0758530927, 0.074700422, 0.0735434174, 0.072431828, 0.0713140935, 0.070240597, 0.0691598658, 0.0681252207, 0.0670834379, 0.0660853849, 0.0650801888, 0.06411731, 0.0631448138, 0.0622141698, 0.0612732032, 0.0603734747, 0.0594632165, 0.0585938632, 0.0577127595, 0.0568722698, 0.0560193827, 0.0552076028, 0.0543840979, 0.0536009166, 0.0528040601, 0.0520464664, 0.0512745707, 0.0505413394, 0.0497932955, 0.0490835822, 0.0483583789, 0.0476713996, 0.0469686721, 0.0463036428, 0.0456223521, 0.0449785198, 0.0443178146, 0.043694833, 0.0430544511, 0.042453215, 0.0418313874, 0.041251228, 0.0406459441, 0.0400858251, 0.0394982447, 0.0389557844, 0.038385432, 0.0378601333, 0.0373063739, 0.0367982759, 0.0362620644, 0.0357710964, 0.0352512508, 0.0347757712, 0.0342709091, 0.0338103023, 0.0333201801, 0.032873779, 0.03239768, 0.0319650443, 0.0315024835, 0.0310832793, 0.0306339805, 0.0302283728, 0.0297927044, 0.0293997665, 0.0289763955, 0.0285958221, 0.028184429, 0.0278153083, 0.0274152678, 0.0270573449, 0.0266681763, 0.0263210989, 0.025942643, 0.0256059088, 0.0252378245, 0.0249115523, 0.0245536126, 0.0242371775, 0.0238889542, 0.0235819093, 0.0232430885, 0.0229453038, 0.0226154675, 0.0223266603, 0.0220056778, 0.0217254296, 0.021413018, 0.0211412205, 0.0208369778, 0.0205733892, 0.0202772331, 0.0200214259, 0.0197331028, 0.0194850249, 0.0192041277, 0.0189635761, 0.0186900571, 0.0184566194, 0.0181903231, 0.0179640735, 0.0177045848, 0.0174852236, 0.0172324689, 0.0170196133, 0.0167733895, 0.0165670587, 0.0163269973, 0.0161271593, 0.0158934983, 0.0156996384, 0.0154720247, 0.0152841562, 0.0150622535, 0.014880692, 0.0146651905, 0.0144894203, 0.0142795154, 0.0141091287, 0.0139043111, 0.0137391054, 0.0135394053, 0.0133790596, 0.0131843238, 0.0130288866, 0.0128388093, 0.0126881246, 0.0125027539, 0.0123565382, 0.0121757235, 0.0120340327, 0.0118574744, 0.0117201542, 0.0115479024, 0.0114146603, 0.0112465769, 0.011117481, 0.0109533121, 0.0108282716, 0.0106680888, 0.0105467937, 0.0103904596, 0.0102729726, 0.0101201906, 0.0100063942, 0.0098572529, 0.0097468639, 0.0096012491, 0.0094943808, 0.0093520312, 0.0092485635, 0.0091095654, 0.0090092215, 0.0088734701, 0.0087763543, 0.0086435793, 0.0085495892, 0.0084199084, 0.0083287743, 0.0082020924, 0.0081139428, 0.0079899918, 0.0079047886, 0.0077838575, 0.0077014831, 0.007583358, 0.0075037411, 0.0073880782, 0.0073111283, 0.0071980556, 0.007123519, 0.0070129545, 0.0069409653, 0.006832656, 0.0067631554, 0.006657359, 0.0065901699, 0.006486704, 0.0064218528, 0.0063204488, 0.0062578545, 0.0061586573, 0.0060980715, 0.0060010104, 0.0059425733, 0.0058474103, 0.0057910601, 0.0056979598, 0.0056434461, 0.0055523319, 0.005499798, 0.0054104352, 0.0053598224, 0.0052724008, 0.0052235147, 0.0051380176, 0.0050914823, 0.0050083006, 0.0049638195, 0.0048824068, 0.0048394092, 0.0047596481, 0.0047182947, 0.004639952, 0.0046001883, 0.0045234324, 0.0044850268, 0.0044098082, 0.004372932, 0.004299012, 0.0042636171, 0.0041911669, 0.0041570091, 0.0040859842, 0.0040532391, 0.0039833967, 0.003952024, 0.0038835441, 0.003853301, 0.003786145, 0.0037572149, 0.0036911374, 0.0036634838, 0.0035986706, 0.0035720499, 0.0035084642, 0.0034830633, 0.0034204643, 0.0033962471, 0.0033348221, 0.0033115475, 0.0032512634, 0.0032291215, 0.0031697393, 0.0031486992, 0.0030904068, 0.0030702283, 0.0030129946, 0.0029938703, 0.0029374564, 0.0029193565, 0.0028639577, 0.0028466422, 0.002792229, 0.002775893, 0.002722229, 0.0027068434, 0.0026541249, 0.0026394573, 0.002587653, 0.0025739017, 0.0025227758, 0.0025099136, 0.0024596641, 0.0024474574, 0.00239806, 0.0023867045, 0.0023379247, 0.0023273962, 0.0022794341, 0.0022695006, 0.0022223314, 0.0022131937, 0.0021665829, 0.0021582172, 0.0021123657, 0.0021045412, 0.0020594267, 0.0020523459, 0.0020077396, 0.0020013769, 0.001957484, 0.0019516137, 0.0019084138, 0.0019032352, 0.0018604952, 0.0018559907, 0.0018139096, 0.00180985, 0.0017684074, 0.0017649971, 0.0017239665, 0.0017211845, 0.0016807726, 0.0016783876, 0.0016385754, 0.001636794, 0.0015973536, 0.0015961567, 0.0015572953, 0.001556457, 0.0015181549, 0.0015178819, 0.0014799123, 0.0014801874, 0.0014427577, 0.0014433531, 0.001406451, 0.0014075734, 0.0013709698, 0.0013726055, 0.0013365091, 0.0013384305, 0.0013028429, 0.0013052704, 0.0012699464, 0.0012728563, 0.0012380062, 0.001241167, 0.0012067788, 0.0012104036, 0.0011762473, 0.0011803232, 0.0011466127, 0.001150908, 0.0011176325, 0.001122363, 0.0010892911, 0.001094447, 0.0010617936, 0.0010671417, 0.0010348968, 0.0010406551, 0.0010085853, 0.001014744, 0.0009830687, 0.000989393, 0.0009581027, 0.0009648116, 0.0009336729, 0.0009407573, 0.0009099909, 0.0009172184, 0.000886813, 0.0008944054, 0.0008641266, 0.0008720744, 0.0008421449, 0.0008502145, 0.0008206256, 0.0008290394, 0.0007995576, 0.0008083049, 0.0007791548, 0.0007880014, 0.0007591733, 0.0007683463, 0.0007396039, 0.0007490936, 0.0007206632, 0.0007302345, 0.0007021086, 0.0007119915, 0.0006839344, 0.0006941171, 0.0006663559, 0.0006766012, 0.000649128, 0.0006596651, 0.0006322428, 0.0006430661, 0.0006159223, 0.000626793, 0.0005999211, 0.000611071, 0.0005842317, 0.0005956551, 0.0005690859, 0.0005805427, 0.0005542336, 0.000565954, 0.000539663, 0.0005516399, 0.0005256027, 0.0005375969, 0.0005118053, 0.0005240531, 0.0004982639, 0.0005107581, 0.0004852082, 0.0004977066, 0.0004723903, 0.000485129, 0.0004598033, 0.0004727754, 0.000447679, 0.0004606409, 0.0004357678, 0.0004489605, 0.0004240647, 0.000437482, 0.0004128038, 0.000426201, 0.0004017338, 0.0004153527, 0.0003908529, 0.000404685, 0.0003803946, 0.0003941946, 0.000370107, 0.0003841175, 0.0003599878, 0.0003742039, 0.0003502734, 0.0003644489, 0.0003407114, 0.0003550903, 0.0003312989, 0.000345876, 0.0003222763, 0.0003368025, 0.0003133891, 0.0003281094, 0.0003046343, 0.0003195442, 0.0002962527, 0.0003111056, 0.0002879907, 0.0003030324, 0.0002798452, 0.0002950716, 0.0002720581, 0.0002872202, 0.0002643781, 0.0002797206, 0.0002567999, 0.0002723193, 0.0002495668, 0.0002650127, 0.0002424259, 0.0002580468, 0.0002353732, 0.0002511663, 0.0002286533, 0.0002443676, 0.0002220126, 0.0002378963, 0.0002154495, 0.0002314984, 0.0002092077, 0.0002251702, 0.0002030337, 0.0002191578, 0.000196924, 0.00021321, 0.0001911249, 0.0002073206, 0.000185383, 0.0002017368, 0.0001796941, 0.0001962059, 0.0001743079, 0.0001907243, 0.0001689689, 0.0001855384, 0.0001636727, 0.000180395, 0.0001586685, 0.000175292, 0.0001537024, 0.0001704761, 0.0001487697, 0.0001656945, 0.0001441199, 0.0001609426, 0.0001395016, 0.0001564695, 0.000134908, 0.0001520224, 0.0001305896, 0.0001475961, 0.0001262932, 0.0001434429, 0.0001220135, 0.0001393079, 0.0001180015, 0.0001351861, 0.0001140037, 0.000131329, 0.000110017, 0.0001274832, 0.0001062911, 0.0001236431, 0.0001025726, 0.0001200609, 9.89E-005, 0.0001164847, 9.54E-005, 0.0001129077, 9.19E-005, 0.0001095832, 8.85E-005, 0.0001062574, 8.53E-005, 0.0001029254, 8.20E-005, 9.98E-005, 7.88E-005, 9.67E-005, 7.58E-005, 9.36E-005, 7.28E-005, 9.08E-005, 6.98E-005, 8.79E-005, 6.70E-005, 8.50E-005, 6.43E-005, 8.23E-005, 6.14E-005, 7.97E-005, 5.89E-005, 7.70E-005, 5.63E-005, 7.45E-005, 5.37E-005, 7.20E-005, 5.13E-005, 6.95E-005, 4.89E-005, 6.72E-005, 4.64E-005, 6.49E-005, 4.42E-005, 6.26E-005, 4.19E-005, 6.04E-005, 3.97E-005, 5.83E-005, 3.76E-005, 5.61E-005, 3.55E-005, 5.42E-005, 3.34E-005, 5.22E-005, 3.15E-005, 5.02E-005, 2.95E-005, 4.84E-005, 2.75E-005, 4.65E-005, 2.58E-005, 4.47E-005, 2.39E-005, 4.30E-005, 2.21E-005, 4.13E-005, 2.05E-005, 3.95E-005, 1.88E-005, 3.80E-005, 1.70E-005, 3.64E-005, 1.55E-005, 3.47E-005, 1.39E-005, 3.33E-005, 1.23E-005, 3.18E-005, 1.09E-005, 3.03E-005, 9.43E-006, 2.90E-005, 7.91E-006, 2.77E-005, 6.62E-006, 2.62E-005, 5.25E-006, 2.51E-005, 3.82E-006, 2.38E-005, 2.63E-006, 2.25E-005, 1.36E-006, 2.14E-005
]
resultados[99]['5ta'] = [1.5300245307, 1.1737697913, 0.9463942072, 0.8159962594, 0.7382839964, 0.6810101176, 0.6428216559, 0.6096383175, 0.5809622063, 0.5553323348, 0.5323132016, 0.5114211069, 0.4922376292, 0.474466932, 0.4579991442, 0.4425457005, 0.4279770821, 0.414210296, 0.4012128901, 0.3888127077, 0.3770349428, 0.3657639725, 0.3550092752, 0.3447247743, 0.3350364774, 0.325832259, 0.3171855003, 0.3089168468, 0.3010105066, 0.2934589435, 0.2862381654, 0.2792676484, 0.2725782315, 0.2660729026, 0.2597981465, 0.2536855922, 0.2478106902, 0.2420980843, 0.2365794029, 0.2312032821, 0.2260164898, 0.2209660116, 0.2160840098, 0.211314662, 0.206712694, 0.202212043, 0.1978628896, 0.1936097885, 0.1895155484, 0.1855191561, 0.1816663828, 0.1778899857, 0.1742406687, 0.1706582494, 0.1672030239, 0.1638290544, 0.1605967266, 0.1574366084, 0.1544192682, 0.1514658277, 0.1486252334, 0.145819871, 0.1431135385, 0.1404331446, 0.1378491398, 0.1352888431, 0.1328195544, 0.1303753304, 0.128013576, 0.1256801878, 0.1234488742, 0.1212563608, 0.1191539823, 0.1170678878, 0.1150560061, 0.1130544174, 0.1111397263, 0.1092440466, 0.1074206133, 0.1056036276, 0.1038520264, 0.1021042903, 0.1004235486, 0.09874689, 0.0971333809, 0.0955219649, 0.0939708777, 0.0924233279, 0.0909346951, 0.0894449861, 0.0880141059, 0.0865803913, 0.0852022297, 0.0838197796, 0.0824919568, 0.0811568453, 0.0798760872, 0.0785903124, 0.0773645297, 0.076138092, 0.0749725295, 0.0738020284, 0.0726821294, 0.0715527202, 0.0704719576, 0.0693801978, 0.0683363989, 0.0672831208, 0.0662778398, 0.0652609565, 0.0642919616, 0.0633099957, 0.0623737188, 0.0614238343, 0.0605187947, 0.0595997267, 0.0587253109, 0.0578361722, 0.0569910475, 0.0561304826, 0.0553133142, 0.0544812037, 0.0536931924, 0.0528894552, 0.0521281159, 0.0513498735, 0.0506131794, 0.0498592933, 0.049146356, 0.0484156468, 0.047725597, 0.047017394, 0.0463495837, 0.0456632591, 0.0450168531, 0.0443512761, 0.0437255318, 0.0430805011, 0.0424748087, 0.0418499262, 0.041264156, 0.040658267, 0.0400909649, 0.0395031938, 0.0389535526, 0.038383278, 0.0378508124, 0.0372974249, 0.0367815526, 0.0362452121, 0.0357464814, 0.0352274048, 0.0347451203, 0.0342413177, 0.0337740898, 0.0332850958, 0.0328323074, 0.0323576437, 0.031918826, 0.0314576791, 0.0310324887, 0.0305846819, 0.0301725021, 0.0297381814, 0.029339651, 0.0289178656, 0.0285317001, 0.0281222176, 0.027747869, 0.0273499091, 0.0269868704, 0.02659986, 0.0262477593, 0.0258715516, 0.0255299126, 0.0251641359, 0.0248328068, 0.0244772044, 0.0241561035, 0.0238103476, 0.0234987132, 0.023162446, 0.0228601595, 0.0225329217, 0.0222397142, 0.0219213883, 0.0216368367, 0.0213271449, 0.0210515338, 0.0207496362, 0.0204831006, 0.020188554, 0.0199306403, 0.0196432157, 0.0193938075, 0.0191131339, 0.0188719669, 0.0185986798, 0.0183647183, 0.0180986159, 0.0178717905, 0.0176125912, 0.0173927285, 0.0171403261, 0.0169270076, 0.0166811661, 0.0164743824, 0.016234735, 0.0160343042, 0.0158009315, 0.0156067103, 0.0153794742, 0.0151912328, 0.0149697305, 0.0147873054, 0.0145717951, 0.0143954337, 0.0141860527, 0.0140153848, 0.0138111541, 0.0136457284, 0.0134466301, 0.0132860427, 0.0130919537, 0.0129362929, 0.0127468243, 0.0125959811, 0.0124112266, 0.0122648099, 0.0120846639, 0.011942785, 0.0117668685, 0.0116294242, 0.011457838, 0.0113244348, 0.0111570728, 0.0110278175, 0.0108643172, 0.0107391215, 0.0105796378, 0.0104581727, 0.0103026001, 0.0101849613, 0.0100329234, 0.0099190088, 0.0097706236, 0.0096600821, 0.0095152702, 0.009408233, 0.0092666511, 0.0091630575, 0.0090248515, 0.0089243464, 0.0087894331, 0.0086921543, 0.008560199, 0.0084660748, 0.0083372178, 0.0082459186, 0.0081200966, 0.0080317676, 0.0079086575, 0.0078232483, 0.0077029912, 0.0076202583, 0.0075029836, 0.0074231483, 0.0073084046, 0.007231243, 0.0071191166, 0.0070443066, 0.006934747, 0.0068624509, 0.0067551433, 0.0066853134, 0.0065804182, 0.0065127655, 0.0064103452, 0.0063451279, 0.0062447865, 0.0061818438, 0.00608372, 0.0060227461, 0.0059268019, 0.0058679613, 0.0057738949, 0.005717151, 0.0056251343, 0.0055701991, 0.0054802127, 0.0054272555, 0.0053389897, 0.0052879745, 0.0052016052, 0.0051522473, 0.0050677733, 0.0050202874, 0.0049374552, 0.0048921696, 0.0048117683, 0.0047683909, 0.0046897456, 0.0046480158, 0.0045707583, 0.0045306518, 0.0044549612, 0.0044161924, 0.0043420531, 0.004304811, 0.0042319356, 0.0041962122, 0.0041247878, 0.0040902911, 0.004020308, 0.003987224, 0.0039183917, 0.0038867117, 0.0038192223, 0.0037886606, 0.0037225056, 0.0036932553, 0.0036281477, 0.0036002056, 0.0035363414, 0.0035094193, 0.0034467991, 0.0034210926, 0.0033594278, 0.0033349354, 0.0032744274, 0.0032508628, 0.0031915102, 0.003169077, 0.0031105927, 0.0030892923, 0.0030318797, 0.0030114273, 0.0029550876, 0.0029356894, 0.0028801369, 0.0028617942, 0.0028072352, 0.0027896643, 0.002736104, 0.0027195156, 0.0026666659, 0.0026510689, 0.0025991381, 0.002584245, 0.0025332404, 0.0025192652, 0.0024688999, 0.0024558502, 0.0024063438, 0.0023939299, 0.0023452889, 0.0023337311, 0.0022856694, 0.002274974, 0.0022277118, 0.0022175927, 0.0021711379, 0.0021618159, 0.0021158833, 0.0021073727, 0.0020621805, 0.0020541991, 0.0020097541, 0.0020025263, 0.0019585378, 0.0019520772, 0.001908771, 0.0019027917, 0.0018601796, 0.0018549102, 0.001812705, 0.0018081607, 0.0017665911, 0.001762482, 0.0017215568, 0.0017181118, 0.0016775487, 0.0016747743, 0.001634805, 0.0016324172, 0.001593055, 0.0015912852, 0.0015522462, 0.0015511057, 0.0015126225, 0.0015118261, 0.0014739102, 0.0014736935, 0.0014360613, 0.0014364336, 0.0013993251, 0.0014, 0.001363425, 0.0013646418, 0.0013283175, 0.0013300854, 0.0012942528, 0.0012962889, 0.0012609584, 0.0012635022, 0.00122839, 0.0012314531, 0.0011968069, 0.0012001162, 0.0011659522, 0.0011697358, 0.0011357605, 0.0011400316, 0.0011064903, 0.0011109602, 0.0010778677, 0.001082783, 0.0010498508, 0.0010552237, 0.0010227002, 0.0010282441, 0.0009961435, 0.0010021054, 0.0009701431, 0.0009765356, 0.0009449579, 0.0009514948, 0.0009203184, 0.0009272468, 0.0008961841, 0.0009035219, 0.0008728205, 0.0008802766, 0.0008499568, 0.0008577807, 0.0008275512, 0.0008357626, 0.0008058764, 0.0008141811, 0.0007846579, 0.0007933083, 0.0007638563, 0.0007728737, 0.0007437451, 0.0007528379, 0.0007240532, 0.0007334719, 0.0007047384, 0.0007145068, 0.0006860766, 0.0006959, 0.0006677995, 0.000677929, 0.0006498614, 0.0006603236, 0.000632544, 0.0006430409, 0.000615576, 0.0006263665, 0.0005989181, 0.0006100264, 0.00058285, 0.0005939782, 0.0005670993, 0.0005785049, 0.0005516267, 0.0005633351, 0.0005367141, 0.0005484258, 0.0005220913, 0.0005340641, 0.0005077155, 0.0005199816, 0.0004938755, 0.000506133, 0.000480304, 0.000492813, 0.0004669528, 0.0004797435, 0.0004541142, 0.0004668793, 0.0004415129, 0.0004545144, 0.0004291062, 0.0004423779, 0.0004171888, 0.0004304249, 0.0004054873, 0.0004189484, 0.0003939552, 0.0004076773, 0.0003828904, 0.0003965651, 0.0003720227, 0.0003859104, 0.0003613019, 0.000375441, 0.0003510298, 0.0003651094, 0.0003409338, 0.0003552197, 0.000330966, 0.0003454948, 0.0003214287, 0.0003358892, 0.0003120482, 0.0003267068, 0.0003027801, 0.0003176728, 0.0002939255, 0.0003087405, 0.0002852111, 0.0003002149, 0.0002765908, 0.0002918237, 0.0002683698, 0.0002835171, 0.0002602726, 0.0002756029, 0.0002522531, 0.000267806, 0.0002446212, 0.0002600795, 0.0002370975, 0.0002527314, 0.0002296376, 0.0002454854, 0.0002225507, 0.0002382986, 0.0002155589, 0.0002314777, 0.0002086172, 0.0002247457, 0.0002020359, 0.0002180591, 0.0001955391, 0.0002117278, 0.0001890796, 0.0002054725, 0.0001829697, 0.0001992493, 0.0001769308, 0.0001933732, 0.0001709183, 0.0001875607, 0.0001652452, 0.0001817694, 0.0001596308, 0.0001763139, 0.0001540348, 0.0001709118, 0.0001487683, 0.0001655204, 0.0001435504, 0.000160455, 0.0001383393, 0.0001554362, 0.0001334497, 0.0001504175, 0.0001285991, 0.0001457182, 0.0001237445, 0.0001410545, 0.0001192063, 0.0001363829, 0.0001146981, 0.0001320216, 0.0001101768, 0.0001276871, 0.000105963, 0.0001233384, 0.0001017717, 0.0001192926, 9.76E-005, 0.0001152659, 9.36E-005, 0.0001112157, 8.98E-005, 0.0001074627, 8.58E-005, 0.0001037219, 8.22E-005, 9.99E-005, 7.86E-005, 9.65E-005, 7.49E-005, 9.30E-005, 7.15E-005, 8.95E-005, 6.82E-005, 8.63E-005, 6.48E-005, 8.30E-005, 6.16E-005, 7.98E-005, 5.85E-005, 7.68E-005, 5.53E-005, 7.38E-005, 5.24E-005, 7.07E-005, 4.95E-005, 6.79E-005, 4.65E-005, 6.52E-005, 4.39E-005, 6.23E-005, 4.11E-005, 5.98E-005, 3.84E-005, 5.72E-005, 3.59E-005, 5.45E-005, 3.34E-005, 5.22E-005, 3.08E-005, 4.98E-005, 2.85E-005, 4.74E-005, 2.61E-005, 4.52E-005, 2.37E-005, 4.30E-005, 2.16E-005, 4.07E-005, 1.94E-005, 3.87E-005, 1.71E-005, 3.67E-005, 1.52E-005, 3.45E-005, 1.31E-005, 3.27E-005, 1.10E-005, 3.08E-005, 9.19E-006, 2.89E-005, 7.29E-006, 2.72E-005, 5.31E-006, 2.55E-005, 3.64E-006, 2.36E-005, 1.87E-006, 2.22E-005
]
resultados[99]['10'] = [1.5298740429, 1.1733324852, 0.9457552419, 0.8151871284, 0.7373365958, 0.6841674432, 0.6452276873, 0.6113582785, 0.5824532745, 0.556748045, 0.4671704876, 0.4485764998, 0.4325492947, 0.4179855109, 0.4044466732, 0.3916821084, 0.3795610443, 0.3680180267, 0.3570100468, 0.3464375175, 0.2429287503, 0.2366938027, 0.2307144555, 0.2250792859, 0.2196975405, 0.2145454917, 0.2095840828, 0.2047902923, 0.2001839272, 0.1957365076, 0.1921932223, 0.1879849286, 0.183940333, 0.18004663, 0.1762568516, 0.1725808602, 0.1690231639, 0.1655638743, 0.1621983293, 0.158947759, 0.1553429238, 0.1523033771, 0.1493619434, 0.1464864244, 0.1436908377, 0.1409754435, 0.138301951, 0.1356852221, 0.1331400361, 0.1306456744, 0.1282962326, 0.1259133256, 0.1235771182, 0.1212948058, 0.1190808433, 0.1169228785, 0.1148273828, 0.1127829008, 0.1107744594, 0.1088065685, 0.1070775477, 0.1051993453, 0.1033727171, 0.101590474, 0.0998351464, 0.0981187057, 0.0964408824, 0.0947908751, 0.0931726394, 0.0915862819, 0.0902079378, 0.0886748297, 0.0871768835, 0.085701171, 0.0842578075, 0.0828482568, 0.081453056, 0.080091541, 0.0787595509, 0.0774449312, 0.076137518, 0.0748752326, 0.0736264012, 0.0724042241, 0.0712038482, 0.0700204838, 0.0688723964, 0.0677565453, 0.0666551057, 0.065581601, 0.0646332858, 0.063591715, 0.0625696141, 0.0615733216, 0.06057912, 0.0596024255, 0.0586401234, 0.0576941099, 0.0567637286, 0.0558548491, 0.0546448993, 0.0537723848, 0.0529216741, 0.0520774658, 0.0512468993, 0.0504397572, 0.049635377, 0.0488481316, 0.0480799031, 0.0473151366, 0.0464471105, 0.0457171489, 0.0449902685, 0.0442754759, 0.0435815749, 0.0428924681, 0.0422119649, 0.0415521788, 0.040893457, 0.0402451781, 0.03979885, 0.0391706747, 0.0385496368, 0.0379484405, 0.0373467912, 0.0367529998, 0.0361768668, 0.0356039856, 0.0350378657, 0.0344911426, 0.0337258542, 0.0331869841, 0.0326651492, 0.0321472231, 0.0316321053, 0.0311376159, 0.0306443193, 0.0301535867, 0.0296785203, 0.0292062165, 0.0287876588, 0.0283350441, 0.027884399, 0.0274369568, 0.0270042931, 0.0265733884, 0.0261417858, 0.0257282018, 0.0253153454, 0.0249061877, 0.0245896229, 0.0241962747, 0.0238017112, 0.0234238879, 0.0230452313, 0.0226692214, 0.0223090178, 0.0219506541, 0.021590486, 0.0212462486, 0.0207835664, 0.0204395731, 0.0201112011, 0.0197854965, 0.0194574769, 0.0191452369, 0.0188304282, 0.0185149936, 0.0182136072, 0.0179141013, 0.0176589339, 0.0173738934, 0.017085969, 0.0167972268, 0.0165212708, 0.0162455009, 0.0159677382, 0.0157071837, 0.0154443459, 0.0151799084, 0.0149726584, 0.0147188855, 0.0144619218, 0.0142213191, 0.0139790294, 0.013736462, 0.0135057748, 0.0132737311, 0.0130373291, 0.0128160141, 0.0125103118, 0.0122879302, 0.0120782202, 0.0118676622, 0.0116510691, 0.0114490422, 0.0112443975, 0.0110383226, 0.0108460449, 0.0106536334, 0.0104954623, 0.0103105537, 0.0101215092, 0.0099300632, 0.0097518437, 0.0095741151, 0.0093915808, 0.0092228036, 0.0090495863, 0.008873175, 0.0087298791, 0.0085648367, 0.0083959835, 0.0082419637, 0.0080830398, 0.007920976, 0.0077697476, 0.0076168502, 0.0074597049, 0.0073183409, 0.0071263112, 0.0069778547, 0.0068398151, 0.0067005423, 0.0065548146, 0.0064243515, 0.0062917609, 0.006155729, 0.0060299476, 0.0059023388, 0.005794415, 0.0056733697, 0.0055490631, 0.0054230467, 0.0053084732, 0.0051914039, 0.0050675442, 0.0049569497, 0.0048414065, 0.0047234576, 0.0046329655, 0.0045260462, 0.0044118458, 0.0043112599, 0.0042049278, 0.004094679, 0.0039961839, 0.0038976895, 0.0037929236, 0.0037011932, 0.0035714577, 0.0034701937, 0.00337925, 0.0032881274, 0.0031915277, 0.0031086374, 0.0030204386, 0.0029273598, 0.0028441371, 0.0027591599, 0.0026862614, 0.0026102855, 0.0025293734, 0.0024437021, 0.0023681788, 0.0022897823, 0.0022037408, 0.0021322307, 0.0020575985, 0.0019789982, 0.0019196689, 0.0018480131, 0.0017684125, 0.0017023213, 0.001630941, 0.00155793, 0.0014958318, 0.0014305917, 0.0013571124, 0.0012972226, 0.0012108216, 0.0011405492, 0.0010834523, 0.0010259359, 0.0009573039, 0.0009031126, 0.0008438773, 0.0007775439, 0.0007234515, 0.0006708813, 0.0006186519, 0.0005710673, 0.0005158273, 0.0004548133, 0.0004050201, 0.0003537908, 0.0002936333, 0.0002528, 0.0002009702, 0.0001460181, 0.0001075033, 6.19E-005
]
resultados[99]['5'] = [1.5298978805, 1.1733943382, 0.945850437, 0.8153072829, 0.737482297, 0.6801411068, 0.6418958736, 0.6086781226, 0.579959708, 0.5542827581, 0.4589047036, 0.4400404454, 0.4238155935, 0.4095451569, 0.3961957588, 0.3605092245, 0.3496197341, 0.3391624522, 0.3295966692, 0.3205689159, 0.3068779655, 0.298863858, 0.2912304931, 0.2839149173, 0.2769218609, 0.2707922987, 0.2642360479, 0.2578890929, 0.251732455, 0.245787083, 0.2403900445, 0.2347960126, 0.2293892855, 0.2241348724, 0.2190431922, 0.2129181026, 0.2081743049, 0.2035596513, 0.1990649909, 0.1947070347, 0.1917550645, 0.1875829196, 0.1835452892, 0.1796237143, 0.1758137054, 0.1709632141, 0.1673953748, 0.1639087033, 0.1605277121, 0.1572476701, 0.1546726308, 0.1515702923, 0.1485650323, 0.145649451, 0.1428378057, 0.1399499295, 0.1372737067, 0.1346603651, 0.1321116007, 0.1296057185, 0.1275977565, 0.1252018562, 0.12285695, 0.1205696061, 0.1183386826, 0.1157508418, 0.113662834, 0.1116245875, 0.1096314099, 0.107679936, 0.1061648868, 0.1042785653, 0.1024513449, 0.1006731308, 0.0989288577, 0.0971679723, 0.0955007099, 0.0938559629, 0.0922540178, 0.0906841832, 0.0891852994, 0.0876701025, 0.0861892066, 0.0847250476, 0.0832996193, 0.081947668, 0.0805703681, 0.0792236999, 0.0779049385, 0.0766022938, 0.0755396217, 0.0742877396, 0.0730518913, 0.0718433798, 0.0706579157, 0.0692280857, 0.0680887275, 0.0669742482, 0.0658804468, 0.0648193487, 0.0635639304, 0.0625397478, 0.0615392061, 0.0605583121, 0.0595870617, 0.0588082216, 0.0578768094, 0.0569530336, 0.05604995, 0.0551641648, 0.0539869623, 0.0531343551, 0.0522999891, 0.051469942, 0.0506597891, 0.049983368, 0.0491911199, 0.0484166349, 0.0476583165, 0.0469046095, 0.0461534109, 0.0454321072, 0.0447144832, 0.0440121095, 0.0433255688, 0.0426297437, 0.04196426, 0.0413123889, 0.0406630388, 0.040028215, 0.0393647509, 0.0387453922, 0.0381407348, 0.0375490507, 0.0369587334, 0.0364668888, 0.035901049, 0.03533697, 0.0347826153, 0.0342427652, 0.033656945, 0.0331300717, 0.0326164577, 0.0321028937, 0.0315990649, 0.0310648886, 0.0305759428, 0.0300958946, 0.0296288126, 0.0291620744, 0.0287946001, 0.0283464644, 0.0278996473, 0.0274589655, 0.027031983, 0.0265301521, 0.0261108501, 0.0257039909, 0.0252966529, 0.0248952513, 0.0245191558, 0.0241310414, 0.0237489168, 0.0233786956, 0.0230074223, 0.0226587348, 0.0223039979, 0.0219493889, 0.0215984887, 0.0212602993, 0.020950175, 0.0206136086, 0.0202886339, 0.0199631868, 0.0196410753, 0.0192362755, 0.0189274348, 0.0186205741, 0.0183258922, 0.0180288531, 0.017820906, 0.0175377153, 0.0172540791, 0.0169723137, 0.0167020961, 0.0163495979, 0.0160791617, 0.0158204525, 0.0155611139, 0.0153032298, 0.0150874926, 0.0148385767, 0.0145908402, 0.0143534253, 0.0141140189, 0.0138565072, 0.0136310661, 0.0134040568, 0.0131763999, 0.0129596918, 0.0127868533, 0.0125666638, 0.0123583541, 0.012148642, 0.0119393078, 0.0116417632, 0.0114414898, 0.0112399575, 0.0110499774, 0.0108575334, 0.010730327, 0.0105488892, 0.010364992, 0.0101792848, 0.0100037349, 0.0097599308, 0.0095826361, 0.009417124, 0.0092495202, 0.0090797695, 0.0089508637, 0.0087871986, 0.008622626, 0.0084690934, 0.0083142844, 0.0081256239, 0.0079802237, 0.0078308944, 0.0076795699, 0.0075378894, 0.0074371598, 0.0072923791, 0.0071586301, 0.0070220484, 0.0068823644, 0.0066719437, 0.0065399705, 0.0064067568, 0.0062860262, 0.0061606737, 0.006077316, 0.0059587447, 0.0058373468, 0.0057124578, 0.0055990927, 0.0054415796, 0.0053250781, 0.0052176706, 0.0051070224, 0.0049922203, 0.0049104543, 0.0048026676, 0.0046951033, 0.0045969054, 0.0044955751, 0.0043640294, 0.0042687448, 0.0041693746, 0.0040689859, 0.0039792969, 0.0039165076, 0.0038197618, 0.0037323843, 0.0036413329, 0.0035467318, 0.0034114193, 0.003326788, 0.0032383134, 0.0031602802, 0.0030763081, 0.0030163615, 0.0029382176, 0.0028580517, 0.002775685, 0.0027042136, 0.0026077867, 0.0025281328, 0.0024567823, 0.0023825939, 0.0023048726, 0.0022517193, 0.0021813537, 0.0021087371, 0.0020440012, 0.0019753353, 0.0018891624, 0.0018277994, 0.0017635994, 0.0016967805, 0.0016384169, 0.0015938731, 0.0015263305, 0.0014690111, 0.0014073789, 0.0013449841, 0.0012661768, 0.0012084052, 0.0011466579, 0.0010959332, 0.001037436, 0.000993186, 0.000945278, 0.000893161, 0.0008361958, 0.000790091, 0.0007288789, 0.0006730335, 0.0006274661, 0.0005797931, 0.0005269773, 0.000491612, 0.0004422915, 0.0003909116, 0.0003489903, 0.0003025389, 0.0002479306, 0.0002123569, 0.0001677473, 0.0001206035, 8.19E-005, 5.12E-005
]
resultados[99]['4'] = [1.5298774495, 1.1733501668, 0.945777937, 0.8152128316, 0.7329432346, 0.6803458136, 0.6418556883, 0.6082648719, 0.5311527039, 0.5080000919, 0.4875336499, 0.4688923528, 0.4509683894, 0.4356768606, 0.4212387352, 0.4076527573, 0.3270315488, 0.3174406098, 0.3088273659, 0.3006672067, 0.294203483, 0.2867777314, 0.2797099663, 0.2729010516, 0.2635056493, 0.2572259178, 0.2511409256, 0.2452465484, 0.240544552, 0.2349758666, 0.2295553527, 0.2243011465, 0.2177147968, 0.2128221788, 0.2080559432, 0.2034617686, 0.2000616463, 0.1956735273, 0.1914025286, 0.1872509874, 0.1821325943, 0.1782845642, 0.1745331411, 0.1708670634, 0.1677402659, 0.1642622454, 0.1608759621, 0.1575861558, 0.1547033656, 0.1516024266, 0.1485962265, 0.1456797908, 0.1426295777, 0.1398989561, 0.1372253628, 0.1346056829, 0.1322093553, 0.1297031348, 0.127251666, 0.1248620837, 0.1228868275, 0.1205910444, 0.1183560939, 0.1161651818, 0.1137772561, 0.1117354106, 0.1097365777, 0.1077769007, 0.1060321888, 0.1041475782, 0.102306292, 0.1005270321, 0.0990364346, 0.0973198454, 0.0956465549, 0.0939984743, 0.0921728557, 0.0906020072, 0.0890534521, 0.0875345341, 0.086170482, 0.0847049257, 0.0832669424, 0.081868336, 0.0806835508, 0.0793238024, 0.0779998923, 0.0766945171, 0.0752305647, 0.0739834225, 0.0727488563, 0.0715356886, 0.0704470388, 0.0692779431, 0.0681287043, 0.0670141378, 0.0660794001, 0.0650017193, 0.0639506577, 0.0629137383, 0.0607938043, 0.0598242704, 0.0588619086, 0.0579150436, 0.0573961686, 0.0564784413, 0.0555709809, 0.0546909612, 0.0533646069, 0.0525122985, 0.0516829969, 0.0508614074, 0.050100819, 0.0493108258, 0.0485283318, 0.0477547127, 0.0470492757, 0.0463036668, 0.0455643189, 0.0448483174, 0.0442158979, 0.0435112377, 0.0428292579, 0.0421531401, 0.0411104055, 0.0404677209, 0.0398283561, 0.0391954614, 0.0388610101, 0.0382488853, 0.0376417604, 0.0370539506, 0.0360815378, 0.0355056329, 0.0349499129, 0.0343981718, 0.0339360138, 0.0334060394, 0.0328786348, 0.0323528977, 0.0318954562, 0.0313894954, 0.0308875198, 0.0304046388, 0.02991761, 0.0294382236, 0.0289748247, 0.0285138553, 0.0279186523, 0.027480062, 0.0270450748, 0.0266102971, 0.0263617706, 0.0259405566, 0.0255198149, 0.025116137, 0.0244775998, 0.0240818925, 0.0237017687, 0.0233234708, 0.0230010601, 0.022634655, 0.0222707312, 0.0219069317, 0.0216106738, 0.0212634495, 0.0209149204, 0.0205810133, 0.020210735, 0.0198761686, 0.0195568035, 0.0192413382, 0.018876059, 0.0185716838, 0.0182679077, 0.0179614495, 0.01777303, 0.0174807568, 0.0171884764, 0.0169111245, 0.0165026024, 0.0162227606, 0.0159552652, 0.0156902316, 0.0154498259, 0.0151969991, 0.0149444664, 0.0146878214, 0.0144932932, 0.0142472554, 0.0139989353, 0.0137663806, 0.0134920712, 0.0132573639, 0.0130338349, 0.0128105276, 0.0125638178, 0.0123496208, 0.012135997, 0.0119206945, 0.0117833181, 0.0115774714, 0.0113674679, 0.0111706568, 0.0109005436, 0.0107010001, 0.0105147932, 0.0103289066, 0.010143072, 0.009963564, 0.009781443, 0.0095962449, 0.0094678302, 0.0092951461, 0.0091185765, 0.0089551059, 0.0087527195, 0.0085816021, 0.0084221166, 0.0082636767, 0.0080897469, 0.0079409654, 0.0077881811, 0.007631312, 0.0075309372, 0.0073820815, 0.0072303548, 0.0070940252, 0.0069150227, 0.0067708848, 0.006637708, 0.0065023545, 0.0063560365, 0.0062300895, 0.0061021361, 0.0059702852, 0.0058837405, 0.0057590737, 0.0056284074, 0.0055113474, 0.0053671068, 0.0052455256, 0.0051350652, 0.0050220176, 0.0048905031, 0.0047834904, 0.0046720163, 0.0045582803, 0.0044892684, 0.0043852181, 0.0042751344, 0.0041782038, 0.0040539786, 0.0039471801, 0.0038527419, 0.0037582527, 0.0036469081, 0.003558808, 0.0034654173, 0.0033668873, 0.0033059548, 0.0032160451, 0.0031225518, 0.0030428897, 0.0029432454, 0.0028530285, 0.0027727002, 0.0026896145, 0.0025884775, 0.0025159602, 0.0024379785, 0.0023549172, 0.002305484, 0.0022286291, 0.002145257, 0.0020764708, 0.0019940464, 0.0019178844, 0.0018520247, 0.0017829572, 0.0016930618, 0.0016294289, 0.0015620166, 0.001491043, 0.0014505602, 0.0013874419, 0.0013160594, 0.0012585675, 0.0011882385, 0.0011197102, 0.0010655259, 0.0010089524, 0.0009301993, 0.0008790962, 0.0008207844, 0.0007560993, 0.0007204776, 0.0006685627, 0.0006069491, 0.000561559, 0.0005041677, 0.0004448396, 0.00039694, 0.0003470595, 0.0002772744, 0.0002383885, 0.0001885961, 0.0001354179, 0.0001045401, 6.05E-005
]
import itertools
from pylab import plt, legend
marker = itertools.cycle(('+', 'o', '*'))
# codigo
plt.figure(1)
plt.xlabel(u'iteraciones')
plt.ylabel('distancia al resultado obtenido')
plt.plot(range(len(resultados[90]['sin'])), resultados[90]['sin'], marker=marker.next(), label=u"Sin EQ")
plt.plot(range(len(resultados[90]['5ta'])), resultados[90]['5ta'], marker=marker.next(), label=u"EQ en 5ta iteración")
plt.plot(range(len(resultados[90]['10'])), resultados[90]['10'], marker=marker.next(), label=u"EQ cada 10 iteraciones")
plt.plot(range(len(resultados[90]['5'])), resultados[90]['5'], marker=marker.next(), label=u"EQ cada 5 iteraciones")
plt.plot(range(len(resultados[90]['4'])), resultados[90]['4'], marker=marker.next(), label=u"EQ cada 4 iteraciones")
legend()
marker = itertools.cycle(('+', 'o', '*'))
plt.figure(2)
plt.xlabel(u'iteraciones')
plt.ylabel('distancia al resultado obtenido')
plt.plot(range(len(resultados[95]['sin'])), resultados[95]['sin'], marker=marker.next(), label=u"Sin EQ")
plt.plot(range(len(resultados[95]['5ta'])), resultados[95]['5ta'], marker=marker.next(), label=u"EQ en 5ta iteración")
plt.plot(range(len(resultados[95]['10'])), resultados[95]['10'], marker=marker.next(), label=u"EQ cada 10 iteraciones")
plt.plot(range(len(resultados[95]['5'])), resultados[95]['5'], marker=marker.next(), label=u"EQ cada 5 iteraciones")
plt.plot(range(len(resultados[95]['4'])), resultados[95]['4'], marker=marker.next(), label=u"EQ cada 4 iteraciones")
legend()
marker = itertools.cycle(('+', 'o', '*'))
plt.figure(3)
plt.xlabel(u'iteraciones')
plt.ylabel('distancia al resultado obtenido')
plt.plot(range(len(resultados[99]['sin'])), resultados[99]['sin'], marker=marker.next(), label=u"Sin EQ")
plt.plot(range(len(resultados[99]['5ta'])), resultados[99]['5ta'], marker=marker.next(), label=u"EQ en 5ta iteración")
plt.plot(range(len(resultados[99]['10'])), resultados[99]['10'], marker=marker.next(), label=u"EQ cada 10 iteraciones")
plt.plot(range(len(resultados[99]['5'])), resultados[99]['5'], marker=marker.next(), label=u"EQ cada 5 iteraciones")
plt.plot(range(len(resultados[99]['4'])), resultados[99]['4'], marker=marker.next(), label=u"EQ cada 4 iteraciones")
legend()
plt.show()
| mit |
bright-sparks/chromium-spacewalk | tools/usb_gadget/keyboard_gadget_test.py | 54 | 2183 | #!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
import hid_constants
import keyboard_gadget
import usb_constants
class KeyboardGadgetTest(unittest.TestCase):
def test_key_press(self):
g = keyboard_gadget.KeyboardGadget()
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.FULL)
g.KeyDown(0x04)
self.assertEqual(g.ControlRead(0xA1, 1, 0x0100, 0, 8),
'\x00\x00\x04\x00\x00\x00\x00\x00')
g.KeyUp(0x04)
self.assertEqual(g.ControlRead(0xA1, 1, 0x0100, 0, 8),
'\x00\x00\x00\x00\x00\x00\x00\x00')
chip.SendPacket.assert_has_calls([
mock.call(0x81, '\x00\x00\x04\x00\x00\x00\x00\x00'),
mock.call(0x81, '\x00\x00\x00\x00\x00\x00\x00\x00'),
])
def test_key_press_with_modifier(self):
g = keyboard_gadget.KeyboardGadget()
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.FULL)
g.ModifierDown(hid_constants.ModifierKey.L_SHIFT)
g.KeyDown(0x04)
g.KeyDown(0x05)
g.KeyUp(0x04)
g.KeyUp(0x05)
g.ModifierUp(hid_constants.ModifierKey.L_SHIFT)
chip.SendPacket.assert_has_calls([
mock.call(0x81, '\x02\x00\x00\x00\x00\x00\x00\x00'),
mock.call(0x81, '\x02\x00\x04\x00\x00\x00\x00\x00'),
mock.call(0x81, '\x02\x00\x04\x05\x00\x00\x00\x00'),
mock.call(0x81, '\x02\x00\x00\x05\x00\x00\x00\x00'),
mock.call(0x81, '\x02\x00\x00\x00\x00\x00\x00\x00'),
mock.call(0x81, '\x00\x00\x00\x00\x00\x00\x00\x00'),
])
def test_set_leds(self):
g = keyboard_gadget.KeyboardGadget()
chip = mock.Mock()
g.Connected(chip, usb_constants.Speed.FULL)
self.assertEqual(g.ControlRead(0xA1, 1, 0x0200, 0, 8), '\x00')
self.assertTrue(g.ControlWrite(0x21, 9, 0x0200, 0, '\x01'))
self.assertEqual(g.ControlRead(0xA1, 1, 0x0200, 0, 8), '\x01')
g.ReceivePacket(0x01, '\x03')
self.assertFalse(chip.HaltEndpoint.called)
self.assertEqual(g.ControlRead(0xA1, 1, 0x0200, 0, 8), '\x03')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Mapotempo/mapotempo-qgis-plugin | SwaggerMapo/models/v01_route.py | 1 | 3333 | #!/usr/bin/env python
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class V01Route(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Swagger model
:param dict swaggerTypes: The key is attribute name and the value is attribute type.
:param dict attributeMap: The key is attribute name and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'distance': 'float',
'emission': 'float',
'vehicle_id': 'int',
'start': 'datetime',
'end': 'datetime',
'hidden': 'bool',
'locked': 'bool',
'out_of_date': 'bool',
'stops': 'list[V01Stop]',
'stop_trace': 'str',
'stop_out_of_drive_time': 'bool',
'stop_distance': 'float',
'ref': 'str',
'color': 'str'
}
self.attribute_map = {
'id': 'id',
'distance': 'distance',
'emission': 'emission',
'vehicle_id': 'vehicle_id',
'start': 'start',
'end': 'end',
'hidden': 'hidden',
'locked': 'locked',
'out_of_date': 'out_of_date',
'stops': 'stops',
'stop_trace': 'stop_trace',
'stop_out_of_drive_time': 'stop_out_of_drive_time',
'stop_distance': 'stop_distance',
'ref': 'ref',
'color': 'color'
}
self.id = None # int
self.distance = None # float
self.emission = None # float
self.vehicle_id = None # int
self.start = None # datetime
self.end = None # datetime
self.hidden = None # bool
self.locked = None # bool
self.out_of_date = None # bool
self.stops = None # list[V01Stop]
self.stop_trace = None # str
self.stop_out_of_drive_time = None # bool
self.stop_distance = None # float
self.ref = None # str
self.color = None # str
def __repr__(self):
properties = []
for p in self.__dict__:
if p != 'swaggerTypes' and p != 'attributeMap':
properties.append('{prop}={val!r}'.format(prop=p, val=self.__dict__[p]))
return '<{name} {props}>'.format(name=__name__, props=' '.join(properties))
| gpl-2.0 |
hazrpg/calibre | src/calibre/gui2/store/stores/baen_webscription_plugin.py | 15 | 3332 | # -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
store_version = 1 # Needed for dynamic plugin loading
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import re
import urllib2
from contextlib import closing
from lxml import html
from PyQt5.Qt import QUrl
from calibre import browser, url_slash_cleaner
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.web_store_dialog import WebStoreDialog
class BaenWebScriptionStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
url = 'http://www.baenebooks.com/'
if external or self.config.get('open_external', False):
if detail_item:
url = url + detail_item
open_url(QUrl(url_slash_cleaner(url)))
else:
detail_url = None
if detail_item:
detail_url = url + detail_item
d = WebStoreDialog(self.gui, url, parent, detail_url)
d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', ''))
d.exec_()
def search(self, query, max_results=10, timeout=60):
url = 'http://www.baenebooks.com/searchadv.aspx?IsSubmit=true&SearchTerm=' + urllib2.quote(query)
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read())
for data in doc.xpath('//table//table//table//table//tr'):
if counter <= 0:
break
id = ''.join(data.xpath('./td[1]/a/@href'))
if not id or not id.startswith('p-'):
continue
title = ''.join(data.xpath('./td[1]/a/text()'))
author = ''
cover_url = ''
price = ''
with closing(br.open('http://www.baenebooks.com/' + id.strip(), timeout=timeout/4)) as nf:
idata = html.fromstring(nf.read())
author = ''.join(idata.xpath('//span[@class="ProductNameText"]/../b/text()'))
author = author.split('by ')[-1]
price = ''.join(idata.xpath('//span[@class="variantprice"]/text()'))
a, b, price = price.partition('$')
price = b + price
pnum = ''
mo = re.search(r'p-(?P<num>\d+)-', id.strip())
if mo:
pnum = mo.group('num')
if pnum:
cover_url = 'http://www.baenebooks.com/' + ''.join(idata.xpath('//img[@id="ProductPic%s"]/@src' % pnum))
counter -= 1
s = SearchResult()
s.cover_url = cover_url
s.title = title.strip()
s.author = author.strip()
s.price = price
s.detail_item = id.strip()
s.drm = SearchResult.DRM_UNLOCKED
s.formats = 'RB, MOBI, EPUB, LIT, LRF, RTF, HTML'
yield s
| gpl-3.0 |
akheron/stango | stango/files.py | 1 | 3894 | from functools import reduce
from stango.views import file_from_tar, static_file
import collections
import os
import tarfile
FilespecBase = collections.namedtuple('Filespec', 'path view kwargs')
class Filespec(FilespecBase):
def __new__(cls, path, view, kwargs={}):
if not isinstance(path, str):
raise TypeError('path must be a str, not %r' % path)
if path.startswith('/'):
raise ValueError('%r: path must not start with /' % path)
if not isinstance(view, collections.Callable):
raise TypeError('%r: view must be callable' % path)
if not isinstance(kwargs, dict):
raise TypeError('%r: kwargs must be a dict' % path)
return super(Filespec, cls).__new__(cls, path, view, kwargs)
def isdir(self):
return not self.path or self.path.endswith('/')
def realpath(self, index_file):
if not self.isdir():
return self.path
elif not index_file:
raise ValueError('Directory path and no index_file: %r' %
self.path)
return os.path.join(self.path, index_file)
class Files(collections.MutableSequence):
def __init__(self, *args):
self._data = []
for arg in args:
if isinstance(arg, tuple):
self.append(arg)
elif isinstance(arg, collections.Iterable):
for item in arg:
self.append(item)
else:
self.append(arg)
def _verify(self, arg):
if isinstance(arg, Filespec):
return arg
elif isinstance(arg, tuple):
if len(arg) < 2 or len(arg) > 3:
raise TypeError('expected a tuple of the form (path, view[, kwargs])')
if len(arg) == 2:
path, view = arg
kwargs = {}
else:
path, view, kwargs = arg
return Filespec(path, view, kwargs)
else:
raise TypeError('expected a Filespec object or tuple, got %r' % arg)
def __len__(self):
return len(self._data)
def __getitem__(self, index):
return self._data[index]
def __setitem__(self, index, value):
self._data[index] = self._verify(value)
def __delitem__(self, index):
del self._data[index]
def insert(self, index, value):
self._data.insert(index, self._verify(value))
def __eq__(self, other):
if len(self) != len(other):
return False
for a, b in zip(self, other):
if a != b:
return False
return True
def add_prefix(self, prefix):
return Files((prefix + f.path, f.view, f.kwargs) for f in self)
def _served_path(basepath, filename, strip):
if strip > 0:
parts = filename.split('/')[strip:]
if not parts:
return ''
served_name = os.path.join(*parts)
else:
served_name = filename
return os.path.join(basepath, served_name)
def files_from_tar(basepath, tarname, strip=0):
tar = tarfile.open(tarname, 'r')
result = Files()
for member in tar.getmembers():
if not member.isfile():
continue
filename = _served_path(basepath, member.name, strip)
if filename:
result.append((
filename,
file_from_tar,
{'tar': tar, 'member': member.name}
))
return result
def files_from_dir(basepath, dir_, strip=0):
result = Files()
for dirpath, dirnames, filenames in os.walk(dir_):
for filename in filenames:
path = os.path.join(dirpath, filename)
result.append((
_served_path(basepath, path, strip),
static_file,
{'path': os.path.join(dirpath, filename)}
))
return result
| mit |
numenta/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/offsetbox.py | 69 | 17728 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.patches import bbox_artist as mbbox_artist
DEBUG=False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = zip(*wd_list)
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
sep = (total - sum(w_list))/(len(w_list)-1.)
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh+sep)*len(w_list)
else:
sep = float(total)/(len(w_list)) - maxh
offsets = np.array([(maxh+sep)*i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Geiven a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h-d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left","top"]:
descent=0.
offsets = [d for h, d in hd_list]
elif align in ["right","bottom"]:
descent=0.
offsets = [height-h+d for h, d in hd_list]
elif align == "center":
descent=0.
offsets = [(height-h)*.5+d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
self._children = []
self._offset = (0, 0)
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent):
"""
Get the offset
accepts extent of the box
"""
if callable(self._offset):
return self._offset(width, height, xdescent, ydescent)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd)
return mtransforms.Bbox.from_bounds(px-xd, py-yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent)
for c, (ox, oy) in zip(self.get_children(), offsets):
c.set_offset((px+ox, py+oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
whd_list = [(w, h, xd, (h-yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w,h,xd,yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
self.sep, self.mode)
yoffsets = yoffsets_ + [yd for w,h,xd,yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent+self.pad, ydescent+self.pad, \
zip(xoffsets, yoffsets)
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
if self.height is None:
height_descent = max([h-yd for w,h,xd,yd in whd_list])
ydescent = max([yd for w,h,xd,yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2*self._pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w,h,xd,yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
self.sep, self.mode)
xoffsets = xoffsets_ + [xd for w,h,xd,yd in whd_list]
xdescent=whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent + self.pad, ydescent + self.pad, \
zip(xoffsets, yoffsets)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
return self.width, self.height, self.xdescent, self.ydescent
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
*s* : a string to be displayed.
*textprops* : property dictionary for the text
*multilinebaseline* : If True, baseline for multiline text is
adjusted so that it is (approximatedly)
center-aligned with singleline text.
*minimumdescent* : If True, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if not textprops.has_key("va"):
textprops["va"]="baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform+self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[0][0] # first line
_, hh, dd = renderer.get_text_width_height_descent(
clean_line, self._text._fontproperties, ismath=ismath)
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline: # multi line
d = h-(hh-dd) # the baseline of the first line
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h-dd)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(dd, d_)
else:
d = dd
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
| agpl-3.0 |
mantidproject/mantid | Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/SANSDarkRunBackgroundCorrectionTest.py | 3 | 33677 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.kernel import *
from mantid.api import *
from testhelpers import run_algorithm
import numpy as np
from SANSDarkRunBackgroundCorrection import DarkRunMonitorAndDetectorRemover
from SANSDarkRunBackgroundCorrection import SANSDarkRunBackgroundCorrection
class SANSDarkRunBackgroundCorrectionTest(unittest.TestCase):
#-----
# Workspace2D tests
def test_dark_run_correction_with_uniform_and_not_mean_for_workspace2D_input(self):
# Arrange
spectra = 4
bin_boundaries_scatter = 5
y_value_scatter_run = 4.
e_value_scatter_run = 1.
name_scatter = "_scatter_SANS_test"
self._provide_workspace2D(bin_boundaries_scatter, y_value_scatter_run,
e_value_scatter_run,name_scatter, spectra)
bin_boundaries_dark_run = 20
y_value_dark_run = 0.3
e_value_dark_run = 0.
name_dark_run = "_dark_run_SANS_test"
self._provide_workspace2D(bin_boundaries_dark_run, y_value_dark_run,
e_value_dark_run,name_dark_run, spectra)
# Algorithm configuration
mean = False
uniform = True
normalization_ratio = 0.5
# Act
out_ws_name = "out_test"
alg = run_algorithm(
'SANSDarkRunBackgroundCorrection',
InputWorkspace= name_scatter,
DarkRun = name_dark_run,
Mean = mean,
Uniform =uniform,
NormalizationRatio=normalization_ratio,
OutputWorkspace = out_ws_name,
ApplyToDetectors = True,
ApplyToMonitors = False,
SelectedMonitors = [],
rethrow = True)
# Assert
# We should sum up all bins in the dark run (all y values, hence bin_boundaries_dark_run - 1).
# Then multpliy by the normalization ratio
# Then divide by the bins in the scatterer.
expected_integration = y_value_dark_run* float(bin_boundaries_dark_run - 1)
expected_correction_value = (normalization_ratio*expected_integration/float(bin_boundaries_scatter - 1))
self.assertTrue(AnalysisDataService.doesExist(out_ws_name))
self._check_output_workspace(mtd[name_scatter],
mtd[out_ws_name],
expected_correction_value)
# Clean up
ws_to_clean = [out_ws_name, name_dark_run, name_scatter]
self._clean_up(ws_to_clean)
def test_dark_run_correction_with_uniform_and_mean_for_workspace2D_input(self):
# Arrange
spectra = 4
bin_boundaries_scatter = 5
y_value_scatter_run = 4.
e_value_scatter_run = 1.
name_scatter = "_scatter_SANS_test"
self._provide_workspace2D(bin_boundaries_scatter, y_value_scatter_run,
e_value_scatter_run,name_scatter, spectra)
bin_boundaries_dark_run = 20
y_value_spectra_even_dark_run = [0.3 for element in range(bin_boundaries_dark_run - 1)]
y_value_spectra_odd_dark_run = [0.2 for element in range(bin_boundaries_dark_run - 1)]
y_value_dark_run = (y_value_spectra_even_dark_run + y_value_spectra_odd_dark_run +
y_value_spectra_even_dark_run + y_value_spectra_odd_dark_run)
e_value_dark_run = 0
name_dark_run = "_dark_run_SANS_test"
self._provide_workspace2D(bin_boundaries_dark_run, y_value_dark_run,
e_value_dark_run,name_dark_run, spectra, True)
# Algorithm configuration
mean = True
uniform = True
normalization_ratio = 0.5
# Act
out_ws_name = "out_test"
alg = run_algorithm(
'SANSDarkRunBackgroundCorrection',
InputWorkspace= name_scatter,
DarkRun = name_dark_run,
Mean = mean,
Uniform =uniform,
NormalizationRatio=normalization_ratio,
OutputWorkspace = out_ws_name,
ApplyToDetectors = True,
ApplyToMonitors = False,
SelectedMonitors = [],
rethrow = True)
# Assert
# We should sum up all bins in the dark run (all y values, hence bin_boundaries_dark_run - 1).
# Then multpliy by the normalization ratio
# Then divide by the bins in the scatterer.
expected_integration = sum(y_value_dark_run)/float(mtd[name_dark_run].getNumberHistograms())
expected_correction_value = (normalization_ratio*expected_integration/float(bin_boundaries_scatter - 1))
self.assertTrue(AnalysisDataService.doesExist(out_ws_name))
self._check_output_workspace(mtd[name_scatter],
mtd[out_ws_name],
expected_correction_value)
# Clean up
ws_to_clean = [out_ws_name, name_dark_run, name_scatter]
self._clean_up(ws_to_clean)
def test_dark_run_correction_with_non_uniform_and_not_mean_for_workspace2D_input(self):
# Arrange
spectra = 4
bin_boundaries= 5
y_value_scatter_run = spectra*[element for element in range(bin_boundaries-1)]
e_value_scatter_run = 1.
name_scatter = "_scatter_SANS_test"
self._provide_workspace2D(bin_boundaries, y_value_scatter_run,
e_value_scatter_run,name_scatter, spectra, True)
y_value_dark_run = spectra*[element*0.2 for element in range(bin_boundaries - 1)]
e_value_dark_run = 0
name_dark_run = "_dark_run_SANS_test"
self._provide_workspace2D(bin_boundaries, y_value_dark_run,
e_value_dark_run, name_dark_run, spectra, True)
# Algorithm configuration
mean = False
uniform = False
normalization_ratio = 0.6
# Act
out_ws_name = "out_test"
alg = run_algorithm(
'SANSDarkRunBackgroundCorrection',
InputWorkspace= name_scatter,
DarkRun = name_dark_run,
Mean = mean,
Uniform =uniform,
NormalizationRatio=normalization_ratio,
OutputWorkspace = out_ws_name,
ApplyToDetectors = True,
ApplyToMonitors = False,
SelectedMonitors = [],
rethrow = True)
# Assert
# We should sum up all bins in the dark run (all y values, hence bin_boundaries_dark_run - 1).
# Then multpliy by the normalization ratio
# Then divide by the bins in the scatterer.
expected_correction_value = normalization_ratio
self.assertTrue(AnalysisDataService.doesExist(out_ws_name))
self._check_output_workspace_non_uniform(mtd[name_scatter],
mtd[out_ws_name],
mtd[name_dark_run],
expected_correction_value)
# Clean up
ws_to_clean = [out_ws_name, name_dark_run, name_scatter]
self._clean_up(ws_to_clean)
def test_that_only_monitors_are_corrected_if_only_monitors_should_be_corrected(self):
# Arrange
monY_scatter = 1.
monE_scatter = 1.
dataY_scatter = 2.
dataE_scatter = 2.
scatter_ws = self._load_workspace_with_monitors(monY_scatter, monE_scatter,
dataY_scatter, dataE_scatter,
as_dark_run = False)
monY_dark = 3.
monE_dark = 3.
dataY_dark = 4.
dataE_dark = 4.
dark_run = self._load_workspace_with_monitors(monY_dark, monE_dark,
dataY_dark, dataE_dark,
as_dark_run = True)
mean = False
uniform = True
normalization_ratio = 0.6
applyToMonitors = True
applyToDetectors = False
out_ws_name = "out_test"
selected_monitor = []
# Act
ws = self._do_run_dark_subtraction(scatter_ws, dark_run, mean, uniform, normalization_ratio,
out_ws_name, applyToMonitors, applyToDetectors, selected_monitor)
# Assert
self.assertAlmostEquals(ws.getNumberHistograms(), scatter_ws.getNumberHistograms(), 5)
comparison = lambda data, expected : all([self.assertAlmostEqual(data[i], expected, 5, "Should be equal")
for i in range(0, len(data))])
# Expected value for monitors
expected_monitor_Y = monY_scatter - monY_dark*len(dark_run.dataY(0))/len(scatter_ws.dataY(0))*normalization_ratio
comparison(ws.dataY(0), expected_monitor_Y)
comparison(ws.dataY(1), expected_monitor_Y)
# Expected value for detectors
expected_detector_Y = dataY_scatter
for index in range(2, ws.getNumberHistograms()):
comparison(ws.dataY(index), expected_detector_Y)
def test_that_individual_monitor_is_corrected_if_only_individual_monitor_is_chosen(self):
# Arrange
monY_scatter = 1.
monE_scatter = 1.
dataY_scatter = 2.
dataE_scatter = 2.
scatter_ws = self._load_workspace_with_monitors(monY_scatter, monE_scatter,
dataY_scatter, dataE_scatter,
as_dark_run = False)
monY_dark = 3.
monE_dark = 3.
dataY_dark = 4.
dataE_dark = 4.
dark_run = self._load_workspace_with_monitors(monY_dark, monE_dark,
dataY_dark, dataE_dark,
as_dark_run = True)
mean = False
uniform = True
normalization_ratio = 0.6
applyToMonitors = True
applyToDetectors = False
out_ws_name = "out_test"
selected_monitor = [2]
# Act
ws = self._do_run_dark_subtraction(scatter_ws, dark_run, mean, uniform, normalization_ratio,
out_ws_name, applyToMonitors, applyToDetectors, selected_monitor)
# Assert
self.assertAlmostEquals(ws.getNumberHistograms(), scatter_ws.getNumberHistograms(), 5)
comparison = lambda data, expected : all([self.assertAlmostEqual(data[i], expected, 5, "Should be equal")
for i in range(0, len(data))])
# Expected value for monitor 2 -- workspace index 1
expected_monitor_Y_1 = monY_scatter - monY_dark*len(dark_run.dataY(0))/len(scatter_ws.dataY(0))*normalization_ratio
comparison(ws.dataY(1), expected_monitor_Y_1)
# Expected value for monitor 1 -- workspace index 0
expected_monitor_Y_0 = monY_scatter
comparison(ws.dataY(0), expected_monitor_Y_0)
# Expected value for detectors
expected_detector_Y = dataY_scatter
for index in range(2, ws.getNumberHistograms()):
comparison(ws.dataY(index), expected_detector_Y)
def test_that_selecting_monitors_and_detectors_is_allowed(self):
# Arrange
monY_scatter = 1.
monE_scatter = 1.
dataY_scatter = 2.
dataE_scatter = 2.
scatter_ws = self._load_workspace_with_monitors(monY_scatter, monE_scatter,
dataY_scatter, dataE_scatter,
as_dark_run = False)
monY_dark = 3.
monE_dark = 3.
dataY_dark = 4.
dataE_dark = 4.
dark_run = self._load_workspace_with_monitors(monY_dark, monE_dark,
dataY_dark, dataE_dark,
as_dark_run = True)
mean = False
uniform = True
normalization_ratio = 0.6
applyToMonitors = True
applyToDetectors = True
out_ws_name = "out_test"
selected_monitor = []
# Act
ws = self._do_run_dark_subtraction(scatter_ws, dark_run, mean, uniform, normalization_ratio,
out_ws_name, applyToMonitors, applyToDetectors, selected_monitor)
# Assert
self.assertAlmostEquals(ws.getNumberHistograms(), scatter_ws.getNumberHistograms(),5)
comparison = lambda data, expected : all([self.assertAlmostEqual(data[i], expected, 5, "Should be equal")
for i in range(0, len(data))])
# Expected value for monitors
expected_monitor_Y = monY_scatter - monY_dark*len(dark_run.dataY(0))/len(scatter_ws.dataY(0))*normalization_ratio
comparison(ws.dataY(1), expected_monitor_Y)
comparison(ws.dataY(0), expected_monitor_Y)
# Expected value for detectors
expected_detector_Y = dataY_scatter - dataY_dark*len(dark_run.dataY(0))/len(scatter_ws.dataY(0))*normalization_ratio
for index in range(2, ws.getNumberHistograms()):
comparison(ws.dataY(index), expected_detector_Y)
def test_that_selecting_invidual_monitors_and_detectors_is_allowed(self):
# Arrange
monY_scatter = 1.
monE_scatter = 1.
dataY_scatter = 2.
dataE_scatter = 2.
scatter_ws = self._load_workspace_with_monitors(monY_scatter, monE_scatter,
dataY_scatter, dataE_scatter,
as_dark_run = False)
monY_dark = 3.
monE_dark = 3.
dataY_dark = 4.
dataE_dark = 4.
dark_run = self._load_workspace_with_monitors(monY_dark, monE_dark,
dataY_dark, dataE_dark,
as_dark_run = True)
mean = False
uniform = True
normalization_ratio = 0.6
applyToMonitors = True
applyToDetectors = True
out_ws_name = "out_test"
selected_monitor = [2]
# Act
ws = self._do_run_dark_subtraction(scatter_ws, dark_run, mean, uniform, normalization_ratio,
out_ws_name, applyToMonitors, applyToDetectors, selected_monitor)
# Assert
self.assertAlmostEquals(ws.getNumberHistograms(), scatter_ws.getNumberHistograms(), 5)
comparison = lambda data, expected : all([self.assertAlmostEqual(data[i], expected, 5, "Should be equal")
for i in range(0, len(data))])
# Expected value for monitor 2 -- workspace index 1
expected_monitor_Y_1 = monY_scatter - monY_dark*len(dark_run.dataY(0))/len(scatter_ws.dataY(0))*normalization_ratio
comparison(ws.dataY(1), expected_monitor_Y_1)
# Expected value for monitor 1 -- workspace index 0
expected_monitor_Y_0 = monY_scatter
comparison(ws.dataY(0), expected_monitor_Y_0)
# Expected value for detectors
expected_detector_Y = dataY_scatter - dataY_dark*len(dark_run.dataY(0))/len(scatter_ws.dataY(0))*normalization_ratio
for index in range(2, ws.getNumberHistograms()):
comparison(ws.dataY(index), expected_detector_Y)
def test_that_throws_if_monitor_selection_is_invalid(self):
# Arrange
monY_scatter = 1.
monE_scatter = 1.
dataY_scatter = 2.
dataE_scatter = 2.
scatter_ws = self._load_workspace_with_monitors(monY_scatter, monE_scatter,
dataY_scatter, dataE_scatter,
as_dark_run = False)
monY_dark = 3.
monE_dark = 3.
dataY_dark = 4.
dataE_dark = 4.
dark_run = self._load_workspace_with_monitors(monY_dark, monE_dark,
dataY_dark, dataE_dark,
as_dark_run = True)
mean = False
uniform = True
normalization_ratio = 0.6
applyToMonitors = True
applyToDetectors = False
out_ws_name = "out_test"
selected_monitor = [3] # only has det IDs 1 and 2 as monitors
# Act + Assert
kwds = {"InputWorkspace": scatter_ws,
"DarkRun": dark_run,
"NormalizationRatio": normalization_ratio,
"Mean": mean,
"Uniform": uniform,
"ApplyToDetectors": applyToDetectors,
"ApplyToMonitors": applyToMonitors,
"SelectedMonitors": selected_monitor,
"OutputWorkspace": "out_ws"}
scatter_name = "scatter_workspace_test"
dark_name = "dark_workspace_test"
AnalysisDataService.add(scatter_name, scatter_ws)
AnalysisDataService.add(dark_name, dark_run)
self.assertRaises(RuntimeError, run_algorithm, 'SANSDarkRunBackgroundCorrection',
rethrow=True, **kwds)
# Clean up
ws_to_clean = [scatter_name, dark_name]
self._clean_up(ws_to_clean)
def test_that_throws_if_neither_monitor_nor_detectors_are_selected(self):
# Arrange
monY_scatter = 1.
monE_scatter = 1.
dataY_scatter = 2.
dataE_scatter = 2.
scatter_ws = self._load_workspace_with_monitors(monY_scatter, monE_scatter,
dataY_scatter, dataE_scatter,
as_dark_run = False)
monY_dark = 3.
monE_dark = 3.
dataY_dark = 4.
dataE_dark = 4.
dark_run = self._load_workspace_with_monitors(monY_dark, monE_dark,
dataY_dark, dataE_dark,
as_dark_run = True)
mean = False
uniform = True
normalization_ratio = 0.6
applyToMonitors = False
applyToDetectors = False
out_ws_name = "out_test"
selected_monitor = []
# Act + Assert
kwds = {"InputWorkspace": scatter_ws,
"DarkRun": dark_run,
"NormalizationRatio": normalization_ratio,
"Mean": mean,
"Uniform": uniform,
"ApplyToDetectors": applyToDetectors,
"ApplyToMonitors": applyToMonitors,
"SelectedMonitors": selected_monitor,
"OutputWorkspace": "out_ws"}
scatter_name = "scatter_workspace_test"
dark_name = "dark_workspace_test"
AnalysisDataService.add(scatter_name, scatter_ws)
AnalysisDataService.add(dark_name, dark_run)
self.assertRaises(RuntimeError, run_algorithm, 'SANSDarkRunBackgroundCorrection',
rethrow=True, **kwds)
# Clean up
ws_to_clean = [scatter_name, dark_name]
self._clean_up(ws_to_clean)
#------
# Helper methods
def _create_test_workspace(self, name, x, y, error, number_of_spectra):
alg = run_algorithm('CreateWorkspace',
DataX = x,
DataY = y,
DataE = error,
NSpec = number_of_spectra,
OutputWorkspace= name)
return alg.getPropertyValue("OutputWorkspace")
def _check_output_workspace(self, original_ws, corrected_ws, expected_correction_value):
# Iterate over all spectra
num_spectra = original_ws.getNumberHistograms()
for index in range(0, num_spectra):
y_original = original_ws.dataY(index)
y_corrected = corrected_ws.dataY(index)
for elem in range(0, len(y_original)):
expected = y_original[elem] - expected_correction_value
self.assertAlmostEqual(expected,
y_corrected[elem], 4)
def _do_run_dark_subtraction(self, scatter, dark_run, mean, uniform, normalization_ratio,
out_ws_name, applyToMonitors, applyToDetectors, selected_monitor):
alg_dark = AlgorithmManager.createUnmanaged("SANSDarkRunBackgroundCorrection")
alg_dark.initialize()
alg_dark.setChild(True)
alg_dark.setProperty("InputWorkspace", scatter)
alg_dark.setProperty("DarkRun", dark_run)
alg_dark.setProperty("Mean", mean)
alg_dark.setProperty("Uniform", uniform)
alg_dark.setProperty("NormalizationRatio", normalization_ratio)
alg_dark.setProperty("OutputWorkspace", out_ws_name)
alg_dark.setProperty("ApplyToMonitors", applyToMonitors)
alg_dark.setProperty("ApplyToDetectors", applyToDetectors)
alg_dark.setProperty("SelectedMonitors", selected_monitor)
alg_dark.execute()
return alg_dark.getProperty("OutputWorkspace").value
def _check_output_workspace_non_uniform(self, original_ws, corrected_ws,
dark_ws, expected_correction_value):
# Iterate over all spectra
num_spectra = original_ws.getNumberHistograms()
for index in range(0, num_spectra):
y_original = original_ws.dataY(index)
y_dark = dark_ws.dataY(index)
y_corrected = corrected_ws.dataY(index)
for elem in range(0, len(y_original)):
expected = y_original[elem] - y_dark[elem]*expected_correction_value
self.assertAlmostEqual(expected, y_corrected[elem], 4)
def _provide_workspace2D(self, bin_boundaries, y_value, e_value, name, spectra, use_y_list = False):
x = spectra*[element for element in range(bin_boundaries)]
y = None
if use_y_list:
y = y_value
else:
y = spectra*[y_value for element in range(bin_boundaries - 1)]
e = spectra*[e_value for element in range(bin_boundaries - 1)]
self._create_test_workspace(name, x, y, e, spectra)
def _clean_up(self, ws_to_clean):
for ws in ws_to_clean:
if AnalysisDataService.doesExist(ws):
AnalysisDataService.remove(ws)
def _load_workspace_with_monitors(self, monY, monE, dataY, dataE, as_dark_run = False):
filename = "LOQ48127np.nxs"
out_ws_name = "sans_workspace_test"
if as_dark_run:
out_ws_name = "dark_run_workspace_test"
alg_load = AlgorithmManager.createUnmanaged("LoadNexusProcessed")
alg_load.initialize()
alg_load.setChild(True)
alg_load.setProperty("Filename", filename)
alg_load.setProperty("OutputWorkspace", out_ws_name)
alg_load.execute()
ws = alg_load.getProperty("OutputWorkspace").value
if as_dark_run:
ws.setY(0,ws.dataY(0)*0.0 + monY)
ws.setE(0,ws.dataE(0)*0.0 + monE)
ws.setY(1,ws.dataY(1)*0.0 + monY)
ws.setE(1,ws.dataE(1)*0.0 + monE)
for element in range(2, ws.getNumberHistograms()):
ws.setY(element, ws.dataY(element)*0.0 + dataY)
ws.setE(element, ws.dataE(element)*0.0 + dataE)
else:
ws.setY(0,ws.dataY(0)*0.0 + monY)
ws.setE(0,ws.dataE(0)*0.0 + monE)
ws.setY(1,ws.dataY(1)*0.0 + monY)
ws.setE(1,ws.dataE(1)*0.0 + monE)
# Set the detector Y and E to 4 and 0.4
for element in range(2, ws.getNumberHistograms()):
ws.setY(element, ws.dataY(element)*0.0 + dataY)
ws.setE(element, ws.dataE(element)*0.0 + dataE)
return ws
class DarkRunMonitorAndDetectorRemoverTest(unittest.TestCase):
def test_finds_all_monitor_indices_when_monitor_is_present(self):
# Arrange
test_ws = self._load_workspace_with_monitors()
ws = mtd[test_ws]
remover = DarkRunMonitorAndDetectorRemover()
# Act
indices = remover.find_monitor_workspace_indices(ws)
# Assert
ws_index, det_ids = zip(*indices)
self.assertEqual(len(indices), 2, "There should be two monitors")
self.assertEqual(ws_index[0], 0, "The first monitor should have a workspace index of 0")
self.assertEqual(ws_index[1], 1, "The second monitor should have a workspace index of 1")
self.assertEqual(det_ids[0], 1, "The first monitor should have a detector ID of 1")
self.assertEqual(det_ids[1], 2, "The second monitor should have a detector ID of 2")
# Clean up
ws_to_clean =[test_ws]
self._clean_up(ws_to_clean)
def test_find_no_monitors_when_no_monitors_are_present(self):
# Arrange
test_ws = self._load_workspace_without_monitors()
ws = mtd[test_ws]
remover = DarkRunMonitorAndDetectorRemover()
# Act
indices = remover.find_monitor_workspace_indices(ws)
# Assert
self.assertEqual(len(indices), 0, "There should be no monitors")
# Clean up
ws_to_clean =[test_ws]
self._clean_up(ws_to_clean)
def test_keep_all_monitors_discard_detectors(self):
# Arrange
test_ws = self._load_workspace_with_monitors()
ws = mtd[test_ws]
remover = DarkRunMonitorAndDetectorRemover()
dataY0_reference = np.copy(ws.dataY(0))
dataE0_reference = np.copy(ws.dataE(0))
dataY1_reference = np.copy(ws.dataY(1))
dataE1_reference = np.copy(ws.dataE(1))
number_histograms_reference = ws.getNumberHistograms()
zero_reference = dataY0_reference*0
# Act
monitor_selection = []
dark_run_corrected = remover.set_pure_monitor_dark_run(ws, monitor_selection)
# Assert
self.assertEqual(dark_run_corrected.getNumberHistograms(), number_histograms_reference,
"The number of histograms should not have changed")
self._assert_items_are_equal(dark_run_corrected.dataY(0), dataY0_reference,
"First monitor Y data should not have changed")
self._assert_items_are_equal(dark_run_corrected.dataE(0), dataE0_reference,
"First monitor E data should not have changed")
self._assert_items_are_equal(dark_run_corrected.dataY(1), dataY1_reference,
"Second monitor Y data should not have changed")
self._assert_items_are_equal(dark_run_corrected.dataE(1), dataE1_reference,
"Second monitor E data should not have changed")
for element in range(2, dark_run_corrected.getNumberHistograms()):
self._assert_items_are_equal(dark_run_corrected.dataY(element), zero_reference,
"The Y data of non-monitor detectors should be 0")
self._assert_items_are_equal(dark_run_corrected.dataE(element), zero_reference,
"The E data of non-monitor detectors should be 0")
# Clean up
ws_to_clean = [test_ws]
self._clean_up(ws_to_clean)
def test_keep_all_detectors_discard_monitors(self):
# Arrange
test_ws = self._load_workspace_with_monitors()
ws = mtd[test_ws]
remover = DarkRunMonitorAndDetectorRemover()
ref_ws = ws.clone()
zero_reference = ref_ws.dataY(0)*0
# Act
monitor_selection = []
dark_run_corrected = remover.set_pure_detector_dark_run(ws)
# Assert
self.assertEqual(dark_run_corrected.getNumberHistograms(), ref_ws.getNumberHistograms(),
"The number of histograms should not have changed")
self._assert_items_are_equal(dark_run_corrected.dataY(0), zero_reference,
"First monitor Y data should be 0")
self._assert_items_are_equal(dark_run_corrected.dataE(0), zero_reference,
"First monitor E data should be 0")
self._assert_items_are_equal(dark_run_corrected.dataY(1), zero_reference,
"Second monitor Y data should be 0")
self._assert_items_are_equal(dark_run_corrected.dataE(1), zero_reference,
"Second monitor E data should be 0")
for element in range(2, dark_run_corrected.getNumberHistograms()):
self._assert_items_are_equal(dark_run_corrected.dataY(element), ref_ws.dataY(element),
"The Y data of non-monitor detectors should not have changed")
self._assert_items_are_equal(dark_run_corrected.dataE(element), ref_ws.dataE(element),
"The E data of non-monitor detectors should not have changed")
# Clean up
ws_to_clean = [test_ws, "ref_ws"]
self._clean_up(ws_to_clean)
def test_that_individual_monitors_can_be_selected(self):
# Arrange
test_ws = self._load_workspace_with_monitors()
ws = mtd[test_ws]
remover = DarkRunMonitorAndDetectorRemover()
zero_reference = np.copy(ws.dataY(0))*0
dataY0_reference = np.copy(ws.dataY(0))
dataE0_reference = np.copy(ws.dataE(0))
number_histograms_reference = ws.getNumberHistograms()
monitor_selection = [1] # We select the monitor with detector ID 1
# which is workspace index 0 for this workspace
# Act
dark_run_corrected = remover.set_pure_monitor_dark_run(ws, monitor_selection)
# Assert
self.assertEqual(dark_run_corrected.getNumberHistograms(), number_histograms_reference,
"The number of histograms should not have changed")
self._assert_items_are_equal(dark_run_corrected.dataY(0), dataY0_reference,
"First monitor Y data should be 0")
self._assert_items_are_equal(dark_run_corrected.dataE(0), dataE0_reference,
"First monitor E data should be 0")
self._assert_items_are_equal(dark_run_corrected.dataY(1), zero_reference,
"Second monitor Y data should not have changed")
self._assert_items_are_equal(dark_run_corrected.dataE(1), zero_reference,
"Second monitor E data should not have changed")
for element in range(2, dark_run_corrected.getNumberHistograms()):
self._assert_items_are_equal(dark_run_corrected.dataY(element), zero_reference,
"The Y data of non-monitor detectors should be 0")
self._assert_items_are_equal(dark_run_corrected.dataE(element), zero_reference,
"The E data of non-monitor detectors should be 0")
# Clean up
ws_to_clean = [test_ws]
self._clean_up(ws_to_clean)
def test_that_throws_if_selection_does_not_match_available_monitor_list(self):
# Arrange
test_ws = self._load_workspace_with_monitors()
ws = mtd[test_ws]
remover = DarkRunMonitorAndDetectorRemover()
zero_reference = np.copy(ws.dataY(0))*0
dataY1_reference = np.copy(ws.dataY(1))
dataE1_reference = np.copy(ws.dataE(1))
number_histograms_reference = ws.getNumberHistograms()
monitor_selection = [0,2]
# Act+ Assert
args = [ws, monitor_selection]
dark_run_corrected = self.assertRaises(RuntimeError, remover.set_pure_monitor_dark_run, *args)
# Clean up
ws_to_clean = [test_ws]
self._clean_up(ws_to_clean)
def _load_workspace_with_monitors(self):
filename = "LOQ48127np.nxs"
out_ws_name = "dark_run_monitor_test_ws"
alg = run_algorithm(
'LoadNexusProcessed',
Filename= filename,
OutputWorkspace = out_ws_name,
rethrow = True)
return alg.getPropertyValue("OutputWorkspace")
def _load_workspace_without_monitors(self):
out_ws_name = "dark_run_monitor_test_ws"
alg = run_algorithm(
'CreateSampleWorkspace',
OutputWorkspace = out_ws_name,
rethrow = True)
return alg.getPropertyValue("OutputWorkspace")
def _clean_up(self, ws_to_clean):
for ws in ws_to_clean:
if AnalysisDataService.doesExist(ws):
AnalysisDataService.remove(ws)
def _assert_items_are_equal(self, list1, list2, message):
# This method is needed since RHEL6 cannot handle assertItemsEqual
for index in range(0, len(list1)):
self.assertEqual(list1[index], list2[index], message)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
gisce/openobject-server | openerp/tools/sql.py | 455 | 1173 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
def drop_view_if_exists(cr, viewname):
cr.execute("DROP view IF EXISTS %s CASCADE" % (viewname,))
cr.commit()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
alex-dow/pybuilder | src/unittest/python/plugins/filter_resources_plugin_tests.py | 7 | 2087 | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fluentmock import UnitTests, when, verify, NEVER
from mock import Mock
from pybuilder.core import Project
from pybuilder.plugins.filter_resources_plugin import ProjectDictWrapper
class ProjectDictWrapperTest(UnitTests):
def test_should_return_project_property_when_property_is_defined(self):
project_mock = Mock(Project)
project_mock.name = "my name"
self.assertEquals("my name", ProjectDictWrapper(project_mock, Mock())["name"])
verify(project_mock, NEVER).get_property("name", "name")
def test_should_delegate_to_project_get_property_when_attribute_is_not_defined(self):
project_mock = Project(".")
when(project_mock).has_property("spam").then_return(True)
when(project_mock).get_property("spam").then_return("eggs")
self.assertEquals("eggs", ProjectDictWrapper(project_mock, Mock())["spam"])
verify(project_mock).get_property("spam")
def test_should_warn_when_substitution_is_skipped(self):
project_mock = Project(".")
logger_mock = Mock()
when(project_mock).has_property("n/a").then_return(False)
self.assertEquals("${n/a}", ProjectDictWrapper(project_mock, logger_mock)["n/a"])
verify(project_mock, NEVER).get_property("n/a")
verify(logger_mock).warn(
"Skipping impossible substitution for 'n/a' - there is no matching project attribute or property.")
| apache-2.0 |
ahmadio/edx-platform | common/lib/xmodule/xmodule/modulestore/django.py | 44 | 10655 | """
Module that provides a connection to the ModuleStore specified in the django settings.
Passes settings.MODULESTORE as kwargs to MongoModuleStore
"""
from __future__ import absolute_import
from importlib import import_module
import logging
import re
from django.conf import settings
# This configuration must be executed BEFORE any additional Django imports. Otherwise, the imports may fail due to
# Django not being configured properly. This mostly applies to tests.
if not settings.configured:
settings.configure()
from django.core.cache import get_cache, InvalidCacheBackendError
import django.dispatch
import django.utils
from pymongo import ReadPreference
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.draft_and_published import BranchSettingMixin
from xmodule.modulestore.mixed import MixedModuleStore
from xmodule.util.django import get_current_request_hostname
import xblock.reference.plugins
try:
# We may not always have the request_cache module available
from request_cache.middleware import RequestCache
HAS_REQUEST_CACHE = True
except ImportError:
HAS_REQUEST_CACHE = False
# We also may not always have the current request user (crum) module available
try:
from xblock_django.user_service import DjangoXBlockUserService
from crum import get_current_user
HAS_USER_SERVICE = True
except ImportError:
HAS_USER_SERVICE = False
try:
from xblock_django.models import XBlockDisableConfig
except ImportError:
XBlockDisableConfig = None
log = logging.getLogger(__name__)
ASSET_IGNORE_REGEX = getattr(settings, "ASSET_IGNORE_REGEX", r"(^\._.*$)|(^\.DS_Store$)|(^.*~$)")
class SignalHandler(object):
"""
This class is to allow the modulestores to emit signals that can be caught
by other parts of the Django application. If your app needs to do something
every time a course is published (e.g. search indexing), you can listen for
that event and kick off a celery task when it happens.
To listen for a signal, do the following::
from django.dispatch import receiver
from celery.task import task
from xmodule.modulestore.django import modulestore, SignalHandler
@receiver(SignalHandler.course_published)
def listen_for_course_publish(sender, course_key, **kwargs):
do_my_expensive_update.delay(course_key)
@task()
def do_my_expensive_update(course_key):
# ...
Things to note:
1. We receive using the Django Signals mechanism.
2. The sender is going to be the class of the modulestore sending it.
3. The names of your handler function's parameters *must* be "sender" and "course_key".
4. Always have **kwargs in your signal handler, as new things may be added.
5. The thing that listens for the signal lives in process, but should do
almost no work. Its main job is to kick off the celery task that will
do the actual work.
"""
pre_publish = django.dispatch.Signal(providing_args=["course_key"])
course_published = django.dispatch.Signal(providing_args=["course_key"])
course_deleted = django.dispatch.Signal(providing_args=["course_key"])
library_updated = django.dispatch.Signal(providing_args=["library_key"])
_mapping = {
"pre_publish": pre_publish,
"course_published": course_published,
"course_deleted": course_deleted,
"library_updated": library_updated,
}
def __init__(self, modulestore_class):
self.modulestore_class = modulestore_class
def send(self, signal_name, **kwargs):
"""
Send the signal to the receivers.
"""
signal = self._mapping[signal_name]
responses = signal.send_robust(sender=self.modulestore_class, **kwargs)
for receiver, response in responses:
log.info('Sent %s signal to %s with kwargs %s. Response was: %s', signal_name, receiver, kwargs, response)
def load_function(path):
"""
Load a function by name.
path is a string of the form "path.to.module.function"
returns the imported python object `function` from `path.to.module`
"""
module_path, _, name = path.rpartition('.')
return getattr(import_module(module_path), name)
def create_modulestore_instance(
engine,
content_store,
doc_store_config,
options,
i18n_service=None,
fs_service=None,
user_service=None,
signal_handler=None,
):
"""
This will return a new instance of a modulestore given an engine and options
"""
class_ = load_function(engine)
_options = {}
_options.update(options)
FUNCTION_KEYS = ['render_template']
for key in FUNCTION_KEYS:
if key in _options and isinstance(_options[key], basestring):
_options[key] = load_function(_options[key])
if HAS_REQUEST_CACHE:
request_cache = RequestCache.get_request_cache()
else:
request_cache = None
try:
metadata_inheritance_cache = get_cache('mongo_metadata_inheritance')
except InvalidCacheBackendError:
metadata_inheritance_cache = get_cache('default')
if issubclass(class_, MixedModuleStore):
_options['create_modulestore_instance'] = create_modulestore_instance
if issubclass(class_, BranchSettingMixin):
_options['branch_setting_func'] = _get_modulestore_branch_setting
if HAS_USER_SERVICE and not user_service:
xb_user_service = DjangoXBlockUserService(get_current_user())
else:
xb_user_service = None
if 'read_preference' in doc_store_config:
doc_store_config['read_preference'] = getattr(ReadPreference, doc_store_config['read_preference'])
if XBlockDisableConfig and settings.FEATURES.get('ENABLE_DISABLING_XBLOCK_TYPES', False):
disabled_xblock_types = XBlockDisableConfig.disabled_block_types()
else:
disabled_xblock_types = ()
return class_(
contentstore=content_store,
metadata_inheritance_cache_subsystem=metadata_inheritance_cache,
request_cache=request_cache,
xblock_mixins=getattr(settings, 'XBLOCK_MIXINS', ()),
xblock_select=getattr(settings, 'XBLOCK_SELECT_FUNCTION', None),
disabled_xblock_types=disabled_xblock_types,
doc_store_config=doc_store_config,
i18n_service=i18n_service or ModuleI18nService(),
fs_service=fs_service or xblock.reference.plugins.FSService(),
user_service=user_service or xb_user_service,
signal_handler=signal_handler or SignalHandler(class_),
**_options
)
# A singleton instance of the Mixed Modulestore
_MIXED_MODULESTORE = None
def modulestore():
"""
Returns the Mixed modulestore
"""
global _MIXED_MODULESTORE # pylint: disable=global-statement
if _MIXED_MODULESTORE is None:
_MIXED_MODULESTORE = create_modulestore_instance(
settings.MODULESTORE['default']['ENGINE'],
contentstore(),
settings.MODULESTORE['default'].get('DOC_STORE_CONFIG', {}),
settings.MODULESTORE['default'].get('OPTIONS', {})
)
if settings.FEATURES.get('CUSTOM_COURSES_EDX'):
# TODO: This import prevents a circular import issue, but is
# symptomatic of a lib having a dependency on code in lms. This
# should be updated to have a setting that enumerates modulestore
# wrappers and then uses that setting to wrap the modulestore in
# appropriate wrappers depending on enabled features.
from ccx.modulestore import CCXModulestoreWrapper # pylint: disable=import-error
_MIXED_MODULESTORE = CCXModulestoreWrapper(_MIXED_MODULESTORE)
return _MIXED_MODULESTORE
def clear_existing_modulestores():
"""
Clear the existing modulestore instances, causing
them to be re-created when accessed again.
This is useful for flushing state between unit tests.
"""
global _MIXED_MODULESTORE # pylint: disable=global-statement
_MIXED_MODULESTORE = None
class ModuleI18nService(object):
"""
Implement the XBlock runtime "i18n" service.
Mostly a pass-through to Django's translation module.
django.utils.translation implements the gettext.Translations interface (it
has ugettext, ungettext, etc), so we can use it directly as the runtime
i18n service.
"""
def __getattr__(self, name):
return getattr(django.utils.translation, name)
def strftime(self, *args, **kwargs):
"""
A locale-aware implementation of strftime.
"""
# This is the wrong place to import this function. I'm putting it here
# because the xmodule test suite can't import this module, because
# Django is not available in that suite. This function isn't called in
# that suite, so this hides the import so the test won't fail.
#
# As I said, this is wrong. But Cale says this code will soon be
# refactored to a place that will be right, and the code can be made
# right there. If you are reading this comment after April 1, 2014,
# then Cale was a liar.
from util.date_utils import strftime_localized
return strftime_localized(*args, **kwargs)
def _get_modulestore_branch_setting():
"""
Returns the branch setting for the module store from the current Django request if configured,
else returns the branch value from the configuration settings if set,
else returns None
The value of the branch setting is cached in a thread-local variable so it is not repeatedly recomputed
"""
def get_branch_setting():
"""
Finds and returns the branch setting based on the Django request and the configuration settings
"""
branch = None
hostname = get_current_request_hostname()
if hostname:
# get mapping information which is defined in configurations
mappings = getattr(settings, 'HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS', None)
# compare hostname against the regex expressions set of mappings which will tell us which branch to use
if mappings:
for key in mappings.iterkeys():
if re.match(key, hostname):
return mappings[key]
if branch is None:
branch = getattr(settings, 'MODULESTORE_BRANCH', None)
return branch
# leaving this in code structured in closure-friendly format b/c we might eventually cache this (again)
# using request_cache
return get_branch_setting()
| agpl-3.0 |
mhubig/borg | borg/lrucache.py | 5 | 1181 | class LRUCache:
def __init__(self, capacity, dispose):
self._cache = {}
self._lru = []
self._capacity = capacity
self._dispose = dispose
def __setitem__(self, key, value):
assert key not in self._cache, (
"Unexpected attempt to replace a cached item,"
" without first deleting the old item.")
self._lru.append(key)
while len(self._lru) > self._capacity:
del self[self._lru[0]]
self._cache[key] = value
def __getitem__(self, key):
value = self._cache[key] # raise KeyError if not found
self._lru.remove(key)
self._lru.append(key)
return value
def __delitem__(self, key):
value = self._cache.pop(key) # raise KeyError if not found
self._dispose(value)
self._lru.remove(key)
def __contains__(self, key):
return key in self._cache
def clear(self):
for value in self._cache.values():
self._dispose(value)
self._cache.clear()
# useful for testing
def items(self):
return self._cache.items()
def __len__(self):
return len(self._cache)
| bsd-3-clause |
chudaol/edx-platform | lms/djangoapps/commerce/api/v1/serializers.py | 11 | 1160 | """ API v1 serializers. """
from rest_framework import serializers
from commerce.api.v1.models import Course
from course_modes.models import CourseMode
class CourseModeSerializer(serializers.ModelSerializer):
""" CourseMode serializer. """
name = serializers.CharField(source='mode_slug')
price = serializers.IntegerField(source='min_price')
expires = serializers.DateTimeField(source='expiration_datetime', required=False, blank=True)
def get_identity(self, data):
try:
return data.get('name', None)
except AttributeError:
return None
class Meta(object): # pylint: disable=missing-docstring
model = CourseMode
fields = ('name', 'currency', 'price', 'sku', 'expires')
class CourseSerializer(serializers.Serializer):
""" Course serializer. """
id = serializers.CharField() # pylint: disable=invalid-name
modes = CourseModeSerializer(many=True, allow_add_remove=True)
def restore_object(self, attrs, instance=None):
if instance is None:
return Course(attrs['id'], attrs['modes'])
instance.update(attrs)
return instance
| agpl-3.0 |
Quikling/gpdb | src/test/tinc/tincrepo/resource_management/memory_accounting/test_oom.py | 10 | 9240 | import tinctest
import unittest2 as unittest
from mpp.models import MPPTestCase
from mpp.lib.PSQL import PSQL
from gppylib.commands.base import Command
from mpp.lib.gpConfig import GpConfig
from tinctest.models.scenario import ScenarioTestCase
class OOMTestCase(MPPTestCase, ScenarioTestCase):
"""
@product_version gpdb:[4.3.0.0-MAIN], hawq: [1.2.1.0-]
"""
@classmethod
def tearDownClass(cls):
# Reset GUC gp_vmem_protect_limit to default
Command('Run gpconfig to set GUC gp_vmem_protect_limit' ,
'source $GPHOME/greenplum_path.sh;gpconfig -c gp_vmem_protect_limit -m 8192 -v 8192; gpconfig -c gp_vmem_limit_per_query -v 0 --skipvalidation').run(validateAfter=True)
# Restart DB
Command('Restart database for GUCs to take effect',
'source $GPHOME/greenplum_path.sh && gpstop -ar').run(validateAfter=True)
def gp_version(self):
"""
@todo: ScenarioTest does not have product from MPPTestCase, need to have the method in ScenarioTestCase.
This is only a hack.
"""
result = PSQL.run_sql_command( sql_cmd='select version()', flags='-t -q' )
if "HAWQ" in result:
return "hawq"
else:
return "gpdb"
def setUp(self):
# Set GUC gp_vmem_protect_limit
self.prd = "_hawq"
if self.gp_version() == "gpdb":
self.prd = ""
gpconfig = GpConfig()
expected_vmem = '20'
expected_runaway_perc = '0'
restart_db = False
if self.name == "OOMTestCase.test_07_OOM_abort_query":
gpconfig.setParameter('gp_vmem_limit_per_query', '2MB', '2MB', '--skipvalidation')
restart_db = True
(vmem, _) = gpconfig.getParameter('gp_vmem_protect_limit')
(runaway_perc, _) = GpConfig().getParameter('runaway_detector_activation_percent')
if runaway_perc == expected_runaway_perc and vmem == expected_vmem:
tinctest.logger.info('gp_vmem_protect_limit and runaway_detector_activation_percent GUCs already set correctly')
else:
tinctest.logger.info('Setting GUC and restarting DB')
gpconfig.setParameter('runaway_detector_activation_percent', expected_runaway_perc, expected_runaway_perc)
gpconfig.setParameter('gp_vmem_protect_limit', expected_vmem, expected_vmem)
restart_db = True
if restart_db:
# Restart DB
Command('Restart database for GUCs to take effect',
'source $GPHOME/greenplum_path.sh && gpstop -ar').run(validateAfter=True)
super(OOMTestCase, self).setUp()
def test_01_OOM_with_singlequery(self):
"""
@description Run a single query OOM and verify log
"""
test_case_list1 = []
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_singlequery_oom')
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_verify_singlequery_oom%s' % self.prd)
self.test_case_scenario.append(test_case_list2)
def test_02_OOM_concurrent_sleeps(self):
"""
@description Run a single query OOM while multiple other queries are sleeping and verify log
"""
test_case_list1 = []
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_sleep')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_sleep')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom')
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_verify_concurrent_sleeps_oom%s' %self.prd)
self.test_case_scenario.append(test_case_list2)
def test_03_OOM_multiple_random(self):
"""
@description Test where multiple active queries randomly hit OOM
"""
test_case_list1 = []
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_mixed_1')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_mixed_2')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_mixed_2')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_mixed_2')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_simple')
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_verify_multiple_random_oom%s' % self.prd)
self.test_case_scenario.append(test_case_list2)
# skipping the test for 1.3.1.0 since it takes hours to run this test
# def test_04_multipleslice_singlequery(self):
# """
# @description Test where single query with multiple slices per segment runs OOM
# """
# test_case_list1 = []
# test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_multislice_oom')
# self.test_case_scenario.append(test_case_list1)
#
# test_case_list2 = []
# test_case_list2.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_verify_multislice_oom%s' % self.prd)
# self.test_case_scenario.append(test_case_list2)
#
# test_case_list3 = []
# test_case_list3.append('resource_management.memory_accounting.scenario.oom_test.runsql.verify.test_oom_count')
# self.test_case_scenario.append(test_case_list3)
def test_05_dumpusage(self):
"""
@description Test with GUC gp_dump_memory_usage
"""
test_case_list1 = []
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_dumpusage')
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append('resource_management.memory_accounting.scenario.oom_test.runsql.verify.test_dumpusage')
self.test_case_scenario.append(test_case_list2)
# QA-2748, need at least 48GB ram to run this test
# GPDB should use the DCA, for HAWQ should use gpdb26.rel.dh.greenplum.com
# This test is dependent on how much memory
@unittest.skip("QA-2748, issue with test on different platform with different memory")
def test_06_OOM_massivequery(self):
"""
@description Test where smaller queries pass while the massive violator dies
"""
test_case_list1 = []
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_small')
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_oom_massive')
self.test_case_scenario.append(test_case_list1)
test_case_list2 = []
test_case_list2.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_verify_massive_oom%s' % self.prd)
self.test_case_scenario.append(test_case_list2)
def test_07_OOM_abort_query(self):
"""
@description Need a mechanism to abort query before gp_vmem_protect_limit is hit
@note Depending on the machine, we may get "VM Protect failed to allocate memory"
or "Per-query VM protect limit reached: current limit is 102400 kB, requested 8388608 bytes, available 2 MB"
"""
test_case_list1 = []
test_case_list1.append('resource_management.memory_accounting.scenario.oom_test.runsql.runtest.test_verify_oom_abort_query')
self.test_case_scenario.append(test_case_list1)
| apache-2.0 |
abtreece/ansible | lib/ansible/parsing/splitter.py | 80 | 10658 | # (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import codecs
from ansible.errors import AnsibleParserError
from ansible.module_utils._text import to_text
from ansible.parsing.quoting import unquote
# Decode escapes adapted from rspeer's answer here:
# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
_HEXCHAR = '[a-fA-F0-9]'
_ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U{0} # 8-digit hex escapes
| \\u{1} # 4-digit hex escapes
| \\x{2} # 2-digit hex escapes
| \\N\{{[^}}]+\}} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)'''.format(_HEXCHAR*8, _HEXCHAR*4, _HEXCHAR*2), re.UNICODE | re.VERBOSE)
def _decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
def parse_kv(args, check_raw=False):
'''
Convert a string of key/value items to a dict. If any free-form params
are found and the check_raw option is set to True, they will be added
to a new parameter called '_raw_params'. If check_raw is not enabled,
they will simply be ignored.
'''
args = to_text(args, nonstring='passthru')
options = {}
if args is not None:
try:
vargs = split_args(args)
except ValueError as ve:
if 'no closing quotation' in str(ve).lower():
raise AnsibleParserError("error parsing argument string, try quoting the entire line.")
else:
raise
raw_params = []
for orig_x in vargs:
x = _decode_escapes(orig_x)
if "=" in x:
pos = 0
try:
while True:
pos = x.index('=', pos + 1)
if pos > 0 and x[pos - 1] != '\\':
break
except ValueError:
# ran out of string, but we must have some escaped equals,
# so replace those and append this to the list of raw params
raw_params.append(x.replace('\\=', '='))
continue
k = x[:pos]
v = x[pos + 1:]
# FIXME: make the retrieval of this list of shell/command
# options a function, so the list is centralized
if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
raw_params.append(orig_x)
else:
options[k.strip()] = unquote(v.strip())
else:
raw_params.append(orig_x)
# recombine the free-form params, if any were found, and assign
# them to a special option for use later by the shell/command module
if len(raw_params) > 0:
options[u'_raw_params'] = ' '.join(raw_params)
return options
def _get_quote_state(token, quote_char):
'''
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
'''
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx-1]
if cur_char in '"\'' and prev_char != '\\':
if quote_char:
if cur_char == quote_char:
quote_char = None
else:
quote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
'''
this function counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
'''
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def split_args(args):
'''
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
'''
# the list of params parsed out of the arg string
# this is going to be the result value when we are done
params = []
# Initial split on white space
args = args.strip()
items = args.strip().split('\n')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split chunk, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for itemidx,item in enumerate(items):
# we split on spaces and newlines separately, so that we
# can tell which character we split on for reassembly
# inside quotation characters
tokens = item.strip().split(' ')
line_continuation = False
for idx,token in enumerate(tokens):
# if we hit a line continuation character, but
# we're not inside quotes, ignore it and continue
# on to the next token while setting a flag
if token == '\\' and not inside_quotes:
line_continuation = True
continue
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes and not(print_depth or block_depth or comment_depth):
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
if idx == 0 and was_inside_quotes:
params[-1] = "%s%s" % (params[-1], token)
elif len(tokens) > 1:
spacer = ''
if idx > 0:
spacer = ' '
params[-1] = "%s%s%s" % (params[-1], spacer, token)
else:
params[-1] = "%s\n%s" % (params[-1], token)
appended = True
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
# and may append the current token to the params (if we haven't previously done so)
prev_print_depth = print_depth
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
if print_depth != prev_print_depth and not appended:
params.append(token)
appended = True
prev_block_depth = block_depth
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
if block_depth != prev_block_depth and not appended:
params.append(token)
appended = True
prev_comment_depth = comment_depth
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
if comment_depth != prev_comment_depth and not appended:
params.append(token)
appended = True
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
# yet appended anything to the list of params, we do so now
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
params.append(token)
# if this was the last token in the list, and we have more than
# one item (meaning we split on newlines), add a newline back here
# to preserve the original structure
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
params[-1] += '\n'
# always clear the line continuation flag
line_continuation = False
# If we're done and things are not at zero depth or we're still inside quotes,
# raise an error to indicate that the args were unbalanced
if print_depth or block_depth or comment_depth or inside_quotes:
raise AnsibleParserError("failed at splitting arguments, either an unbalanced jinja2 block or quotes: {}".format(args))
return params
| mit |
brianwoo/django-tutorial | ENV/lib/python2.7/site-packages/pip/_vendor/distlib/compat.py | 203 | 40541 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2014 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import absolute_import
import os
import re
import sys
if sys.version_info[0] < 3:
from StringIO import StringIO
string_types = basestring,
text_type = unicode
from types import FileType as file_type
import __builtin__ as builtins
import ConfigParser as configparser
from ._backport import shutil
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,
pathname2url, ContentTooShortError, splittype)
def quote(s):
if isinstance(s, unicode):
s = s.encode('utf-8')
return _quote(s)
import urllib2
from urllib2 import (Request, urlopen, URLError, HTTPError,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPSHandler, HTTPHandler, HTTPRedirectHandler,
build_opener)
import httplib
import xmlrpclib
import Queue as queue
from HTMLParser import HTMLParser
import htmlentitydefs
raw_input = raw_input
from itertools import ifilter as filter
from itertools import ifilterfalse as filterfalse
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return match.group(1, 2)
return None, host
else:
from io import StringIO
string_types = str,
text_type = str
from io import TextIOWrapper as file_type
import builtins
import configparser
import shutil
from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote,
unquote, urlsplit, urlunsplit, splittype)
from urllib.request import (urlopen, urlretrieve, Request, url2pathname,
pathname2url,
HTTPBasicAuthHandler, HTTPPasswordMgr,
HTTPSHandler, HTTPHandler, HTTPRedirectHandler,
build_opener)
from urllib.error import HTTPError, URLError, ContentTooShortError
import http.client as httplib
import urllib.request as urllib2
import xmlrpc.client as xmlrpclib
import queue
from html.parser import HTMLParser
import html.entities as htmlentitydefs
raw_input = input
from itertools import filterfalse
filter = filter
try:
from ssl import match_hostname, CertificateError
except ImportError:
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
parts = dn.split('.')
leftmost, remainder = parts[0], parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survery of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
try:
from types import SimpleNamespace as Container
except ImportError:
class Container(object):
"""
A generic container for when multiple values need to be returned
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
try:
from shutil import which
except ImportError:
# Implementation from Python 3.3
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
# ZipFile is a context manager in 2.7, but not in 2.6
from zipfile import ZipFile as BaseZipFile
if hasattr(BaseZipFile, '__enter__'):
ZipFile = BaseZipFile
else:
from zipfile import ZipExtFile as BaseZipExtFile
class ZipExtFile(BaseZipExtFile):
def __init__(self, base):
self.__dict__.update(base.__dict__)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
class ZipFile(BaseZipFile):
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
# return None, so if an exception occurred, it will propagate
def open(self, *args, **kwargs):
base = BaseZipFile.open(self, *args, **kwargs)
return ZipExtFile(base)
try:
from platform import python_implementation
except ImportError: # pragma: no cover
def python_implementation():
"""Return a string identifying the Python implementation."""
if 'PyPy' in sys.version:
return 'PyPy'
if os.name == 'java':
return 'Jython'
if sys.version.startswith('IronPython'):
return 'IronPython'
return 'CPython'
try:
import sysconfig
except ImportError: # pragma: no cover
from ._backport import sysconfig
try:
callable = callable
except NameError: # pragma: no cover
from collections import Callable
def callable(obj):
return isinstance(obj, Callable)
try:
fsencode = os.fsencode
fsdecode = os.fsdecode
except AttributeError: # pragma: no cover
_fsencoding = sys.getfilesystemencoding()
if _fsencoding == 'mbcs':
_fserrors = 'strict'
else:
_fserrors = 'surrogateescape'
def fsencode(filename):
if isinstance(filename, bytes):
return filename
elif isinstance(filename, text_type):
return filename.encode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
def fsdecode(filename):
if isinstance(filename, text_type):
return filename
elif isinstance(filename, bytes):
return filename.decode(_fsencoding, _fserrors)
else:
raise TypeError("expect bytes or str, not %s" %
type(filename).__name__)
try:
from tokenize import detect_encoding
except ImportError: # pragma: no cover
from codecs import BOM_UTF8, lookup
import re
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
# For converting & <-> & etc.
try:
from html import escape
except ImportError:
from cgi import escape
if sys.version_info[:2] < (3, 4):
unescape = HTMLParser().unescape
else:
from html import unescape
try:
from collections import ChainMap
except ImportError: # pragma: no cover
from collections import MutableMapping
try:
from reprlib import recursive_repr as _recursive_repr
except ImportError:
def _recursive_repr(fillvalue='...'):
'''
Decorator to make a repr function return fillvalue for a recursive
call
'''
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
try:
from imp import cache_from_source
except ImportError: # pragma: no cover
def cache_from_source(path, debug_override=None):
assert path.endswith('.py')
if debug_override is None:
debug_override = __debug__
if debug_override:
suffix = 'c'
else:
suffix = 'o'
return path + suffix
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
## {{{ http://code.activestate.com/recipes/576693/ (r9)
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running=None):
'od.__repr__() <==> repr(od)'
if not _repr_running: _repr_running = {}
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
try:
from logging.config import BaseConfigurator, valid_ident
except ImportError: # pragma: no cover
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, string_types):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
| gpl-3.0 |
biddyweb/merchant | billing/tests/test_amazon_fps.py | 3 | 1677 | from xml.dom import minidom
try:
from urlparse import urlparse, parse_qsl
except ImportError:
from urllib.parse import urlparse, parse_qsl
from django.conf import settings
from django.test import TestCase
from django.template import Template, Context
from django.utils.unittest import skipIf
from billing import get_integration
@skipIf(not settings.MERCHANT_SETTINGS.get("amazon_fps", None), "gateway not configured")
class AmazonFPSTestCase(TestCase):
urls = "billing.tests.test_urls"
def setUp(self):
self.fps = get_integration("amazon_fps")
self.fields = {
"callerReference": "100",
"paymentReason": "Digital Download",
"pipelineName": "SingleUse",
"transactionAmount": '30',
"returnURL": "http://localhost/fps/fps-return-url/",
}
self.fps.add_fields(self.fields)
def testLinkGen(self):
tmpl = Template("{% load render_integration from billing_tags %}{% render_integration obj %}")
html = tmpl.render(Context({"obj": self.fps}))
# get the integration link url
dom = minidom.parseString(html)
url = dom.getElementsByTagName('a')[0].attributes['href'].value
parsed = urlparse(url)
query_dict = dict(parse_qsl(parsed.query))
self.assertEquals(parsed.scheme, 'https')
self.assertEquals(parsed.netloc, 'authorize.payments-sandbox.amazon.com')
self.assertEquals(parsed.path, '/cobranded-ui/actions/start')
self.assertDictContainsSubset(self.fields, query_dict)
self.assertEquals(query_dict['callerKey'], settings.MERCHANT_SETTINGS['amazon_fps']['AWS_ACCESS_KEY'])
| bsd-3-clause |
TristanCavelier/notesntools | python/path_normalize.py | 1 | 2998 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Tristan Cavelier <t.cavelier@free.fr>
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://www.wtfpl.net/ for more details.
def normalize(path):
""" normalize(path)
Returns a normalized version of `path` taking care of ".." and "." parts. It
removes all useless "/" but keeps the trailing one.
Examples:
# normalize path and remove trailing slashes
path = normalize(path + "/.")
# normalize path in a chroot
realpath = CHROOT_REALPATH + normalize("/" + path)
"""
if path == "." or path == "": return "."
if path == "..": return ".."
split = path.split("/")
skip = 0
i = len(split) - 1
res = ""
sep = ""
if i > 0:
if split[i] == "":
sep = "/"
i -= 1
while i > 0:
if split[i] == "..":
skip += 1
elif split[i] != "." and split[i] != "":
if skip > 0:
skip -= 1
else:
res = split[i] + sep + res
sep = "/"
i -= 1
if split[0] == "":
res = "/" + res
else:
if split[0] == "..":
skip += 1
elif split[0] != ".":
if skip > 0: skip -= 1
else: res = split[0] + sep + res
while skip > 0:
res = ".." + sep + res
sep = "/"
skip -= 1
if res == "": return "." + sep
return res
######################################################################
# Tests
# prints if failure
def test(a, b):
if a != b:
print(a + " != " + b)
test(normalize(""), ".")
test(normalize("."), ".")
test(normalize("./"), "./")
test(normalize(".."), "..")
test(normalize("../"), "../")
test(normalize("ab/.."), ".")
test(normalize("ab/../"), "./")
test(normalize("./ab"), "ab")
test(normalize("./ab/"), "ab/")
test(normalize("ab"), "ab")
test(normalize("ab/"), "ab/")
test(normalize("/"), "/")
test(normalize("/path/to/here"), "/path/to/here")
test(normalize("/path/to/here/"), "/path/to/here/")
test(normalize("//path//to//here//"), "/path/to/here/")
test(normalize("/path/to/../here"), "/path/here")
test(normalize("/path/to/../here/.."), "/path")
test(normalize("/path/to/../here/../"), "/path/")
test(normalize("/path/to/../../here/.."), "/")
test(normalize("/path/to/../../here"), "/here")
test(normalize("/path/to/../../../here"), "/here")
test(normalize("/path/to/..../here/.."), "/path/to/....")
test(normalize("/path/.b/here/a./.."), "/path/.b/here")
test(normalize("/path/b./here/.a/.."), "/path/b./here")
test(normalize("/../path/to/here"), "/path/to/here")
test(normalize("../path/to/here"), "../path/to/here")
test(normalize("path/../../to/here"), "../to/here")
| mit |
isyippee/ceilometer | ceilometer/storage/base.py | 6 | 8946 | #
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for storage engines
"""
import datetime
import inspect
import math
from oslo_utils import timeutils
import six
from six import moves
import ceilometer
def iter_period(start, end, period):
"""Split a time from start to end in periods of a number of seconds.
This function yields the (start, end) time for each period composing the
time passed as argument.
:param start: When the period set start.
:param end: When the period end starts.
:param period: The duration of the period.
"""
period_start = start
increment = datetime.timedelta(seconds=period)
for i in moves.xrange(int(math.ceil(
timeutils.delta_seconds(start, end)
/ float(period)))):
next_start = period_start + increment
yield (period_start, next_start)
period_start = next_start
def _handle_sort_key(model_name, sort_key=None):
"""Generate sort keys according to the passed in sort key from user.
:param model_name: Database model name be query.(alarm, meter, etc.)
:param sort_key: sort key passed from user.
return: sort keys list
"""
sort_keys_extra = {'alarm': ['name', 'user_id', 'project_id'],
'meter': ['user_id', 'project_id'],
'resource': ['user_id', 'project_id', 'timestamp'],
}
sort_keys = sort_keys_extra[model_name]
if not sort_key:
return sort_keys
# NOTE(Fengqian): We need to put the sort key from user
# in the first place of sort keys list.
try:
sort_keys.remove(sort_key)
except ValueError:
pass
finally:
sort_keys.insert(0, sort_key)
return sort_keys
class MultipleResultsFound(Exception):
pass
class NoResultFound(Exception):
pass
class Model(object):
"""Base class for storage API models."""
def __init__(self, **kwds):
self.fields = list(kwds)
for k, v in six.iteritems(kwds):
setattr(self, k, v)
def as_dict(self):
d = {}
for f in self.fields:
v = getattr(self, f)
if isinstance(v, Model):
v = v.as_dict()
elif isinstance(v, list) and v and isinstance(v[0], Model):
v = [sub.as_dict() for sub in v]
d[f] = v
return d
def __eq__(self, other):
return self.as_dict() == other.as_dict()
@classmethod
def get_field_names(cls):
fields = inspect.getargspec(cls.__init__)[0]
return set(fields) - set(["self"])
class Connection(object):
"""Base class for storage system connections."""
# A dictionary representing the capabilities of this driver.
CAPABILITIES = {
'meters': {'query': {'simple': False,
'metadata': False,
'complex': False}},
'resources': {'query': {'simple': False,
'metadata': False,
'complex': False}},
'samples': {'query': {'simple': False,
'metadata': False,
'complex': False}},
'statistics': {'groupby': False,
'query': {'simple': False,
'metadata': False,
'complex': False},
'aggregation': {'standard': False,
'selectable': {
'max': False,
'min': False,
'sum': False,
'avg': False,
'count': False,
'stddev': False,
'cardinality': False}}
},
}
STORAGE_CAPABILITIES = {
'storage': {'production_ready': False},
}
def __init__(self, url):
pass
@staticmethod
def upgrade():
"""Migrate the database to `version` or the most recent version."""
@staticmethod
def record_metering_data(data):
"""Write the data to the backend storage system.
:param data: a dictionary such as returned by
ceilometer.meter.meter_message_from_counter
All timestamps must be naive utc datetime object.
"""
raise ceilometer.NotImplementedError(
'Recording metering data is not implemented')
@staticmethod
def clear_expired_metering_data(ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
raise ceilometer.NotImplementedError(
'Clearing samples not implemented')
@staticmethod
def get_resources(user=None, project=None, source=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None,
metaquery=None, resource=None, limit=None):
"""Return an iterable of models.Resource instances.
Iterable items containing resource information.
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param source: Optional source filter.
:param start_timestamp: Optional modified timestamp start range.
:param start_timestamp_op: Optional timestamp start range operation.
:param end_timestamp: Optional modified timestamp end range.
:param end_timestamp_op: Optional timestamp end range operation.
:param metaquery: Optional dict with metadata to match on.
:param resource: Optional resource filter.
:param limit: Maximum number of results to return.
"""
raise ceilometer.NotImplementedError('Resources not implemented')
@staticmethod
def get_meters(user=None, project=None, resource=None, source=None,
metaquery=None, limit=None):
"""Return an iterable of model.Meter instances.
Iterable items containing meter information.
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional resource filter.
:param source: Optional source filter.
:param metaquery: Optional dict with metadata to match on.
:param limit: Maximum number of results to return.
"""
raise ceilometer.NotImplementedError('Meters not implemented')
@staticmethod
def get_samples(sample_filter, limit=None):
"""Return an iterable of model.Sample instances.
:param sample_filter: Filter.
:param limit: Maximum number of results to return.
"""
raise ceilometer.NotImplementedError('Samples not implemented')
@staticmethod
def get_meter_statistics(sample_filter, period=None, groupby=None,
aggregate=None):
"""Return an iterable of model.Statistics instances.
The filter must have a meter value set.
"""
raise ceilometer.NotImplementedError('Statistics not implemented')
@staticmethod
def clear():
"""Clear database."""
@staticmethod
def query_samples(filter_expr=None, orderby=None, limit=None):
"""Return an iterable of model.Sample objects.
:param filter_expr: Filter expression for query.
:param orderby: List of field name and direction pairs for order by.
:param limit: Maximum number of results to return.
"""
raise ceilometer.NotImplementedError('Complex query for samples '
'is not implemented.')
@classmethod
def get_capabilities(cls):
"""Return an dictionary with the capabilities of each driver."""
return cls.CAPABILITIES
@classmethod
def get_storage_capabilities(cls):
"""Return a dictionary representing the performance capabilities.
This is needed to evaluate the performance of each driver.
"""
return cls.STORAGE_CAPABILITIES
| apache-2.0 |
gramps-project/gramps | gramps/plugins/export/test/exportvcard_test.py | 6 | 13923 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011 Michiel D. Nauta
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Unittest for export to VCard
"""
import unittest
import time
import subprocess
import sys
import os
import xml.etree.ElementTree as ET
from ...lib.libgrampsxml import GRAMPS_XML_VERSION
from gramps.version import VERSION
from ..exportvcard import VCardWriter
class VCardCheck(unittest.TestCase):
def setUp(self):
self.expect = ["BEGIN:VCARD", "VERSION:3.0",
"PRODID:-//Gramps//NONSGML Gramps %s//EN" % VERSION,
"FN:Lastname", "N:Lastname;;;;",
"SORT-STRING:" + "Lastname".ljust(55), "END:VCARD"]
date = time.localtime(time.time())
self.input_list = ["BEGIN:VCARD", "VERSION:3.0", "FN:Lastname",
"N:Lastname;;;;", "END:VCARD"]
self.header = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE database PUBLIC "-//GRAMPS//DTD GRAMPS XML %s//EN"
"http://gramps-project.org/xml/%s/grampsxml.dtd">""" % \
(GRAMPS_XML_VERSION, GRAMPS_XML_VERSION)
strng = """<database xmlns="http://gramps-project.org/xml/%s/">
<header>
<created date="%04d-%02d-%02d" version="%s"/>
<researcher/>
</header>
<people>
<person id="I0000" handle="_0000">
<name type="Birth Name">
<surname>Lastname</surname>
</name>
</person>
</people>
</database>""" % \
(GRAMPS_XML_VERSION, date[0], date[1], date[2], VERSION)
namespace = "http://gramps-project.org/xml/%s/" % GRAMPS_XML_VERSION
ET.register_namespace("", namespace)
self.database = ET.XML(strng)
self.people = self.database[1]
self.person = self.people[0]
self.name = self.person[0]
self.lastname = self.name[0]
def do_case(self, input_doc, expect_str, debug=False):
if debug:
print(ET.tostring(input_doc))
gcmd = [sys.executable, 'Gramps.py',
'-i', '-', '-f', 'gramps',
'-e', '-', '-f', 'vcf']
process = subprocess.Popen(gcmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ)
input_str = (self.header.encode('utf-8') +
ET.tostring(input_doc, encoding='utf-8'))
result_str, err_str = process.communicate(input_str)
separator = '\r' + os.linesep
expect_str = separator.join(expect_str) + (separator * 2)
if debug:
print(err_str)
print(result_str)
print(expect_str)
self.assertEqual(result_str, expect_str.encode('utf-8'))
def test_base(self):
self.do_case(self.database,
self.expect)
def test_esc_string_none(self):
self.assertEqual(VCardWriter.esc("nothing"), "nothing")
def test_esc_string_all(self):
self.assertEqual(VCardWriter.esc("backslash\\_comma,_semicolon;"),
"backslash\\\\_comma\\,_semicolon\\;")
def test_esc_string_list(self):
self.assertEqual(VCardWriter.esc(["comma,", "semicolon;"]),
["comma\\,", "semicolon\\;"])
def test_esc_string_tuple(self):
self.assertEqual(VCardWriter.esc(("comma,", "semicolon;")),
("comma\\,", "semicolon\\;"))
def test_esc_string_wrongtype(self):
self.assertRaises(TypeError, VCardWriter.esc,
{"comma,":"semicolon;"})
def test_write_formatted_name_title(self):
ET.SubElement(self.name, "title").text = 'Sir.'
self.expect[3] = "FN:Sir. Lastname"
self.expect[4] = "N:Lastname;;;Sir.;"
self.do_case(self.database, self.expect)
def test_write_name_multiple_surname(self):
self.lastname.text = "Oranje"
self.lastname.set("prefix", "van")
ET.SubElement(self.name, "surname").text = "Nassau"
self.expect[3] = "FN:van Oranje Nassau"
self.expect[4] = "N:van Oranje,Nassau;;;;"
self.expect[5] = "SORT-STRING:" + "Oranje".ljust(55)
self.do_case(self.database, self.expect)
def test_write_name_callname(self):
# callname not in first names!
ET.SubElement(self.name, "first").text = "B C"
ET.SubElement(self.name, "call").text = "A"
self.expect[3] = "FN:B C Lastname"
self.expect[4] = "N:Lastname;A;B,C;;"
self.expect[5] = "SORT-STRING:" + "Lastname".ljust(25) + "B C".ljust(30)
self.do_case(self.database, self.expect)
def test_write_name_callname_in_addnames(self):
ET.SubElement(self.name, "first").text = "A B C"
ET.SubElement(self.name, "call").text = "B"
self.expect[3] = "FN:A B C Lastname"
self.expect[4] = "N:Lastname;B;A,C;;"
self.expect[5] = "SORT-STRING:" + "Lastname".ljust(25) + "A B C".ljust(30)
self.do_case(self.database, self.expect)
def test_write_name_no_callname(self):
ET.SubElement(self.name, "first").text = "A B C"
self.expect[3] = "FN:A B C Lastname"
self.expect[4] = "N:Lastname;A;B,C;;"
self.expect[5] = "SORT-STRING:" + "Lastname".ljust(25) + "A B C".ljust(30)
self.do_case(self.database, self.expect)
def test_write_name_no_additional_names(self):
ET.SubElement(self.name, "first").text = "A"
self.expect[3] = "FN:A Lastname"
self.expect[4] = "N:Lastname;A;;;"
self.expect[5] = "SORT-STRING:" + "Lastname".ljust(25) + "A".ljust(30)
self.do_case(self.database, self.expect)
def test_write_name_honprefix(self):
ET.SubElement(self.name, "title").text = "Sir"
self.expect[3] = "FN:Sir Lastname"
self.expect[4] = "N:Lastname;;;Sir;"
self.expect[5] = "SORT-STRING:" + "Lastname".ljust(55)
self.do_case(self.database, self.expect)
def test_write_name_honsuffix(self):
ET.SubElement(self.name, "suffix").text = "Jr."
self.expect[3] = "FN:Lastname\\, Jr."
self.expect[4] = "N:Lastname;;;;Jr."
self.expect[5] = "SORT-STRING:" + "Lastname".ljust(55)+ "Jr."
self.do_case(self.database, self.expect)
def test_nicknames_regular(self):
attribs = {'type': 'Birth Name', 'alt': '1'}
name = ET.SubElement(self.person, "name", attrib=attribs)
ET.SubElement(name, 'nick').text = 'Nick'
name = ET.SubElement(self.person, "name", attrib=attribs)
ET.SubElement(name, 'nick').text = 'N.'
self.expect.insert(6, "NICKNAME:Nick,N.")
self.do_case(self.database, self.expect)
def test_nicknames_primary_nick(self):
ET.SubElement(self.name, 'nick').text = 'Nick'
attribs = {'type': 'Birth Name', 'alt': '1'}
name = ET.SubElement(self.person, "name", attrib=attribs)
ET.SubElement(name, 'nick').text = 'N.'
self.expect.insert(6, "NICKNAME:Nick,N.")
self.do_case(self.database, self.expect)
def test_write_birthdate_regular(self):
events = ET.Element('events')
self.database.insert(1, events)
attribs = {'handle': '_e0000', 'id': 'E0000'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Birth'
ET.SubElement(event, 'dateval', val='2001-02-28')
attribs = {'hlink': '_e0000', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
self.expect.insert(6, "BDAY:2001-02-28")
self.do_case(self.database, self.expect)
def test_write_birthdate_empty(self):
events = ET.Element('events')
self.database.insert(1, events)
attribs = {'handle': '_e0000', 'id': 'E0000'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Birth'
attribs = {'hlink': '_e0000', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
self.do_case(self.database, self.expect)
def test_write_birhtdate_textonly(self):
events = ET.Element('events')
self.database.insert(1, events)
attribs = {'handle': '_e0000', 'id': 'E0000'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Birth'
ET.SubElement(event, 'dateval', val='Christmas 2001')
attribs = {'hlink': '_e0000', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
self.do_case(self.database, self.expect)
def test_write_birthdate_span(self):
events = ET.Element('events')
self.database.insert(1, events)
attribs = {'handle': '_e0000', 'id': 'E0000'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Birth'
attribs = {'start': '2001-02-28', 'stop': '2002-02-28'}
ET.SubElement(event, 'datespan', attrib=attribs)
attribs = {'hlink': '_e0000', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
self.do_case(self.database, self.expect)
def test_write_birthdate_range(self):
events = ET.Element('events')
self.database.insert(1, events)
attribs = {'handle': '_e0000', 'id': 'E0000'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Birth'
attribs = {'start': '2001-02-28', 'stop': '2002-02-28'}
ET.SubElement(event, 'daterange', attrib=attribs)
attribs = {'hlink': '_e0000', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
self.do_case(self.database, self.expect)
def test_write_addresses_regular(self):
address = ET.SubElement(self.person, 'address')
ET.SubElement(address, 'street').text = 'pobox bis street'
ET.SubElement(address, 'city').text = 'place'
ET.SubElement(address, 'country').text = 'country'
ET.SubElement(address, 'state').text = 'province'
ET.SubElement(address, 'postal').text = 'zip'
self.expect.insert(6, "ADR:;;pobox bis street;place;province;zip;country")
self.do_case(self.database, self.expect)
def test_write_addresses_phone(self):
address = ET.SubElement(self.person, 'address')
ET.SubElement(address, 'phone').text = '01234-56789'
self.expect.insert(6, "TEL:01234-56789")
self.do_case(self.database, self.expect)
def test_write_urls_email(self):
attribs = {'type': 'E-mail', 'href': 'me@example.com'}
ET.SubElement(self.person, 'url', attrib=attribs)
self.expect.insert(6, "EMAIL:me@example.com")
self.do_case(self.database, self.expect)
def test_write_urls_emial_mailto(self):
attribs = {'type': 'E-mail', 'href': 'mailto:me@example.com'}
ET.SubElement(self.person, 'url', attrib=attribs)
self.expect.insert(6, "EMAIL:me@example.com")
self.do_case(self.database, self.expect)
def test_write_urls_url(self):
attribs = {'type': 'Web Home', 'href': 'http://www.example.org'}
ET.SubElement(self.person, 'url', attrib=attribs)
self.expect.insert(6, "URL:http://www.example.org")
self.do_case(self.database, self.expect)
def test_write_occupation_regular(self):
events = ET.Element('events')
self.database.insert(1, events)
attribs = {'handle': '_e0000', 'id': 'E0000'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Occupation'
ET.SubElement(event, 'description').text = 'carpenter'
attribs = {'hlink': '_e0000', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
self.expect.insert(6, "ROLE:carpenter")
self.do_case(self.database, self.expect)
def test_write_occupation_lastdate(self):
events = ET.Element('events')
self.database.insert(1, events)
attribs = {'handle': '_e0000', 'id': 'E0000'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Occupation'
ET.SubElement(event, 'dateval', val='2011-02-28')
ET.SubElement(event, 'description').text = 'foreman'
attribs = {'handle': '_e0001', 'id': 'E0001'}
event = ET.SubElement(events, 'event', attrib=attribs)
ET.SubElement(event, 'type').text = 'Occupation'
ET.SubElement(event, 'dateval', val='2000-09-21')
ET.SubElement(event, 'description').text = 'carpenter'
attribs = {'hlink': '_e0000', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
attribs = {'hlink': '_e0001', 'role': 'Primary'}
ET.SubElement(self.person, 'eventref', attrib=attribs)
self.expect.insert(6, "ROLE:foreman")
self.do_case(self.database, self.expect)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
curtislisle/PolarWebApps | aws-antarctic-tangelo/service/getdata.py | 2 | 2183 |
import bson.json_util
import pymongo
import json
from bson import ObjectId
from pymongo import Connection
import string
import tangelo
def convertStringToFloatPoint(lng,lat):
print "(",lng,lat,")"
if lat[-1:] == "S":
outlat= -float(lat[:-1])
else:
outlat = float(lat[:-1])
if lng[-1:] == "W":
outlng = -float(lng[:-1])
else:
outlng = float(lng[:-1])
point = {}
point['lat'] = outlat
point['lng'] = outlng
return point
def run(tablename):
# Create an empty response object.
response = {}
print "using collection:",tablename
# first find out the types of the attributes in this collection. Create a dictionary with the names and types
connection = Connection('localhost', 27017)
db = connection['ivaan']
dataset_collection = db[tablename]
#tablerecord = dataset_collection.find()[0]
# Now that we have the proper types in the table collection stored in a handy local dictionary "attributes", lets
# build a query for mongoDB depending on how many filters are enabled.
querystring = {}
print "query to perform:", querystring
# now that we have the query build, execute it and return the matching records from the collection
connection = Connection('localhost', 27017)
db = connection['polar']
dataset_collection = db[tablename]
# Do a find operation with the passed arguments.
it = dataset_collection.find(querystring)
results = [x for x in it]
connection.close()
# Pack the results into the response object, and return it.
response['data'] = []
for i in range(it.count()):
try:
point = convertStringToFloatPoint(results[i]['stationLng'],results[i]['stationLat'])
results[i]['lng'] = point['lng']
#results[i]['lng'] = 160
results[i]['lat'] = point['lat']
response['data'].append(results[i])
except ValueError:
pass
response['count'] = len(response['data'])
response['result'] = 'OK'
# Return the response object.
tangelo.log(str(response))
return bson.json_util.dumps(response)
| apache-2.0 |
ashwoods/django-allauth | allauth/socialaccount/south_migrations/0007_auto__add_field_socialapp_client_id.py | 78 | 6438 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SocialApp.client_id'
db.add_column('socialaccount_socialapp', 'client_id', self.gf('django.db.models.fields.CharField')(default='', max_length=100), keep_default=False)
def backwards(self, orm):
# Deleting field 'SocialApp.client_id'
db.delete_column('socialaccount_socialapp', 'client_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 12, 22, 12, 51, 3, 966915)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 12, 22, 12, 51, 3, 966743)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'socialaccount.socialaccount': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'SocialAccount'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_data': ('allauth.socialaccount.fields.JSONField', [], {'default': "'{}'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'socialaccount.socialapp': {
'Meta': {'object_name': 'SocialApp'},
'client_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False', 'blank': 'True'})
},
'socialaccount.socialtoken': {
'Meta': {'unique_together': "(('app', 'account'),)", 'object_name': 'SocialToken'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialAccount']"}),
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['socialaccount.SocialApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'token_secret': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['socialaccount']
| mit |
BhaskarNewase/NodeApp | node_modules/node-gyp/gyp/pylib/gyp/input.py | 713 | 115880 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = [
'executable',
'shared_library',
'loadable_module',
'mac_kernel_extension',
]
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section and section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
try:
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
except Exception, e:
raise GypError("%s while executing command '%s' in %s" %
(e, contents, build_file))
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d while in %s." %
(contents, p.returncode, build_file))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables, mac kernel extensions and loadable modules are already fully
# and finally linked. Nothing else can be a link dependency of them, there
# can only be dependencies in the sense that a dependent target might run
# an executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module',
'mac_kernel_extension'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'mac_kernel_extension', 'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check):
if not duplicate_basename_check:
return
if target_dict.get('type', None) != 'static_library':
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'libtool on Mac cannot handle that. Use '
'--no-duplicate-basename-check to disable this validation.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, duplicate_basename_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
| mit |
sam-tsai/django-old | django/utils/numberformat.py | 20 | 1396 | from django.conf import settings
def format(number, decimal_sep, decimal_pos, grouping=0, thousand_sep=''):
"""
Gets a number (as a number or string), and returns it as a string,
using formats definied as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator
* thousand_sep: Thousand separator symbol (for example ",")
"""
# sign
if float(number) < 0:
sign = '-'
else:
sign = ''
# decimal part
str_number = unicode(number)
if str_number[0] == '-':
str_number = str_number[1:]
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
if dec_part: dec_part = decimal_sep + dec_part
# grouping
if settings.USE_L10N and settings.USE_THOUSAND_SEPARATOR and grouping:
int_part_gd = ''
for cnt, digit in enumerate(int_part[::-1]):
if cnt and not cnt % grouping:
int_part_gd += thousand_sep
int_part_gd += digit
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
| bsd-3-clause |
andres-root/bitcoinxt | qa/rpc-tests/invalidblockrequest.py | 87 | 4077 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance
from test_framework.mininode import *
from test_framework.blocktools import *
import logging
import copy
import time
'''
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
'''
# Use the ComparisonTestFramework with 1 node: only use --testbinary.
class InvalidBlockRequestTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
self.num_nodes = 1
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
self.tip = None
self.block_time = None
NetworkThread().start() # Start up network handling in another thread
test.run()
def get_tests(self):
if self.tip is None:
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.block_time = int(time.time())+1
'''
Create a new block with an anyone-can-spend coinbase
'''
block = create_block(self.tip, create_coinbase(), self.block_time)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
yield TestInstance([[block, True]])
'''
Now we need that block to mature so we can spend the coinbase.
'''
test = TestInstance(sync_every_block=False)
for i in xrange(100):
block = create_block(self.tip, create_coinbase(), self.block_time)
block.solve()
self.tip = block.sha256
self.block_time += 1
test.blocks_and_transactions.append([block, True])
yield test
'''
Now we use merkle-root malleability to generate an invalid block with
same blockheader.
Manufacture a block with 3 transactions (coinbase, spend of prior
coinbase, spend of that spend). Duplicate the 3rd transaction to
leave merkle root and blockheader unchanged but invalidate the block.
'''
block2 = create_block(self.tip, create_coinbase(), self.block_time)
self.block_time += 1
# chr(81) is OP_TRUE
tx1 = create_transaction(self.block1.vtx[0], 0, chr(81), 50*100000000)
tx2 = create_transaction(tx1, 0, chr(81), 50*100000000)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert(block2_orig.vtx != block2.vtx)
self.tip = block2.sha256
yield TestInstance([[block2, False], [block2_orig, True]])
'''
Make sure that a totally screwed up block is not valid.
'''
block3 = create_block(self.tip, create_coinbase(), self.block_time)
self.block_time += 1
block3.vtx[0].vout[0].nValue = 100*100000000 # Too high!
block3.vtx[0].sha256=None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
yield TestInstance([[block3, False]])
if __name__ == '__main__':
InvalidBlockRequestTest().main()
| mit |
eva-oss/linux | tools/perf/scripts/python/syscall-counts.py | 1996 | 1700 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
libvirt/libvirt-test-API | selftests/test_nfs.py | 1 | 14069 | import pytest
import shutil
from unittest.mock import Mock, MagicMock, patch
from libvirttestapi.utils import utils, process, nfs
class Cmd_Result():
def __init__(self, exit_status, stderr, stdout):
self.exit_status = exit_status
self.stderr = stderr
self.stdout = stdout
class TestNfs():
def setup_method(self):
self.nfs_path = '/nfs'
self.mount_path = '/mnt'
self.mock_logger = Mock()
self.mock_logger.info = Mock()
self.mock_logger.error = Mock()
self.server_ip = '127.0.0.1'
self.remote_ip = '192.168.1.9'
self.username = 'root'
self.password = 'test'
@patch("libvirttestapi.utils.nfs.local_restart_service")
@patch("libvirttestapi.utils.process.run")
def test_localnfsexport_fail(self, mock_run, mock_local_restart_service):
return_str = Cmd_Result(1, 'fail run', "fail" )
mock_run.return_value = return_str
result = nfs.local_nfs_exported(self.nfs_path, self.mock_logger)
assert result == False
@patch("libvirttestapi.utils.nfs.local_restart_service")
@patch("libvirttestapi.utils.process.run")
def test_localnfsexport_pass(self, mock_run, mock_local_restart_service):
return_str = Cmd_Result(0, 'run', 'success')
mock_run.return_value = return_str
mock_local_restart_service = return_str
result = nfs.local_nfs_exported(self.nfs_path, self.mock_logger)
assert result == True
@patch("libvirttestapi.utils.nfs.local_restart_service")
@patch("libvirttestapi.utils.process.run")
def test_localnfsexport_fail_restart(self, mock_run, mock_local_restart_service):
return_str = Cmd_Result(0, 'run', 'success' )
result_str = Cmd_Result(1, 'fail run', 'fail')
mock_run.return_value = return_str
mock_local_restart_service = result_str
result = nfs.local_nfs_exported(self.nfs_path, self.mock_logger)
assert result == 1
@patch("libvirttestapi.utils.process.run")
def test_localnfsexportclean_fail(self, mock_run):
return_str = Cmd_Result(1, 'fail run', 'fail')
mock_run.return_value = return_str
result = nfs.local_nfs_exported_clean(self.nfs_path, self.mock_logger)
assert result == False
@patch("libvirttestapi.utils.process.run")
def test_localnfsexportclean_pass(self, mock_run):
return_str = Cmd_Result(0, 'run', 'pass')
mock_run.return_value = return_str
result = nfs.local_nfs_exported_clean(self.nfs_path, self.mock_logger)
assert result == True
@patch("libvirttestapi.utils.utils.Is_Fedora")
@patch("libvirttestapi.utils.process.run")
def test_localrestartservice_pass(self, mock_run, mock_Is_Fedora):
return_str = Cmd_Result(0, 'run', 'pass')
mock_run.return_value = return_str
mock_Is_Fedora.return_value = True
result = nfs.local_restart_service(self.mock_logger)
mock_run.assert_called_with("systemctl restart rpcbind", shell=True, ignore_status=True)
assert result == True
@patch("libvirttestapi.utils.utils.isRelease")
@patch("libvirttestapi.utils.utils.Is_Fedora")
@patch("libvirttestapi.utils.process.run")
def test_localrestartservice_callnfs(self, mock_run, mock_Is_Fedora, mock_isRelease):
return_str = Cmd_Result(1, 'fail run', 'fail')
mock_run.return_value = return_str
mock_Is_Fedora.return_value = False
mock_isRelease.return_value = False
result = nfs.local_restart_service(self.mock_logger)
mock_run.assert_called_with("systemctl restart nfs", shell=True, ignore_status=True)
assert result == False
@patch("libvirttestapi.utils.process.system_output")
def test_local_is_mountd(self, mock_system_output):
mount_line = "%s %s\n"%(self.nfs_path, self.mount_path)
mock_system_output.return_value = mount_line
result = nfs.local_is_mounted(self.nfs_path, self.mount_path, self.mock_logger)
assert result == True
@patch("libvirttestapi.utils.process.system_output")
def test_local_not_mountd(self, mock_system_output):
mount_line = "sysfs /sys\n"
mock_system_output.return_value = mount_line
result = nfs.local_is_mounted(self.nfs_path, self.mount_path, self.mock_logger)
assert result == False
@patch("libvirttestapi.utils.process.run")
@patch("libvirttestapi.utils.nfs.local_umount")
@patch("libvirttestapi.utils.nfs.local_is_mounted")
def test_local_unmount_call(self, mock_local_is_mounted, mock_local_umount, mock_run):
mock_local_is_mounted.return_value = True
return_str = Cmd_Result(0, 'run', 'pass')
mock_run.return_value = return_str
nfs.local_mount(self.nfs_path, self.mount_path, self.mock_logger)
assert mock_local_umount.called == True
@patch("libvirttestapi.utils.process.run")
@patch("libvirttestapi.utils.nfs.local_is_mounted")
def test_local_mount_fail(self, mock_local_is_mounted, mock_run):
return_str = Cmd_Result(1, 'fail run', 'fail')
mock_run.return_value = return_str
mock_local_is_mounted.return_value = False
result = nfs.local_mount(self.nfs_path, self.mount_path, self.mock_logger)
assert result == False
@patch("libvirttestapi.utils.nfs.local_restart_service")
@patch("libvirttestapi.utils.process.run")
@patch("libvirttestapi.utils.nfs.local_is_mounted")
def test_local_mount_pass(self, mock_local_is_mounted, mock_run, mock_local_restart_service):
return_str = Cmd_Result(0, 'run', 'pass')
mock_run.return_value = return_str
mock_local_is_mounted.return_value = False
result = nfs.local_mount(self.nfs_path, self.mount_path, self.mock_logger)
assert result == True
@patch("os.path.isdir")
@patch("os.path.exists")
@patch("libvirttestapi.utils.nfs.local_mount")
@patch("libvirttestapi.utils.nfs.local_nfs_exported")
@patch("libvirttestapi.utils.nfs.local_restart_service")
@patch("libvirttestapi.utils.nfs.local_is_mounted")
def test_localnfsetup_pass(self, mock_local_is_mounted, mock_local_restart_service,
mock_local_nfs_exported, mock_local_mount, mock_exists, mock_isdir):
return_str = Cmd_Result(0, 'run', 'pass')
mock_local_is_mounted.return_value = True
mock_local_restart_service.return_value = True
mock_exists.return_value = True
mock_isdir.return_value = True
mock_local_nfs_exported.return_value = True
mock_local_mount.return_value = True
result = nfs.local_nfs_setup(self.nfs_path, self.mount_path, self.mock_logger)
assert result == True
@patch("os.path.isdir")
@patch("os.path.exists")
@patch("libvirttestapi.utils.nfs.local_mount")
@patch("libvirttestapi.utils.nfs.local_nfs_exported")
@patch("libvirttestapi.utils.nfs.local_restart_service")
@patch("libvirttestapi.utils.nfs.local_is_mounted")
def test_localnfsetup_nfsexported_false(self, mock_local_is_mounted, mock_local_restart_service,
mock_local_nfs_exported, mock_local_mount, mock_exists, mock_isdir):
return_str = Cmd_Result(0, 'run', 'pass')
mock_local_is_mounted.return_value = True
mock_local_restart_service.return_value = True
mock_exists.return_value = True
mock_isdir.return_value = True
mock_local_nfs_exported.return_value = False
mock_local_mount.return_value = True
result = nfs.local_nfs_setup(self.nfs_path, self.mount_path, self.mock_logger)
assert result == True
assert mock_local_mount.called == True
@patch("os.path.isdir")
@patch("os.path.exists")
@patch("libvirttestapi.utils.nfs.local_mount")
@patch("libvirttestapi.utils.nfs.local_nfs_exported")
@patch("libvirttestapi.utils.nfs.local_restart_service")
@patch("libvirttestapi.utils.nfs.local_is_mounted")
def test_localnfsetup_localmount_false(self, mock_local_is_mounted, mock_local_restart_service,
mock_local_nfs_exported, mock_local_mount, mock_exists, mock_isdir):
mock_local_is_mounted.return_value = True
mock_local_restart_service.return_value = True
mock_exists.return_value = True
mock_isdir.return_value = True
mock_local_nfs_exported.return_value = True
mock_local_mount.return_value = False
result = nfs.local_nfs_setup(self.nfs_path, self.mount_path, self.mock_logger)
assert result == True
assert mock_local_mount.called == True
@patch("os.path.exists")
@patch("shutil.rmtree")
@patch("libvirttestapi.utils.nfs.local_nfs_exported_clean")
@patch("libvirttestapi.utils.nfs.local_restart_service")
@patch("libvirttestapi.utils.nfs.local_umount")
def test_localnfsclean_exportedclean_called(self, mock_local_umount, mock_local_restart_service,
mock_local_nfs_exported_clean, mock_rmtree, mock_exists):
mock_exists.return_value = True
mock_local_umount.return_value = True
mock_local_restart_service.return_value = True
mock_local_nfs_exported_clean.return_value = True
mock_rmtree.return_value = True
result = nfs.local_nfs_clean(self.nfs_path, self.mount_path, self.mock_logger)
assert result == True
assert mock_local_nfs_exported_clean.called == True
assert mock_rmtree.called == True
@patch("os.path.exists")
@patch("shutil.rmtree")
@patch("libvirttestapi.utils.nfs.local_nfs_exported_clean")
@patch("libvirttestapi.utils.nfs.local_restart_service")
@patch("libvirttestapi.utils.nfs.local_umount")
def test_localnfsclean_exportedclean_noncalled(self, mock_local_umount, mock_local_restart_service,
mock_local_nfs_exported_clean, mock_rmtree, mock_exists):
mock_exists.return_value = False
mock_local_umount.return_value = True
mock_local_restart_service.return_value = True
mock_local_nfs_exported_clean.return_value = True
mock_rmtree.return_value = True
result = nfs.local_nfs_clean(self.nfs_path, self.mount_path, self.mock_logger)
assert result == True
assert mock_local_nfs_exported_clean.called == False
assert mock_local_restart_service.called == True
assert mock_rmtree.called == False
@patch("libvirttestapi.utils.process.run")
@patch("libvirttestapi.utils.nfs.local_nfs_setup")
def test_nfsetup_remoteip_none(self, mock_local_nfs_setup, mock_run):
return_str = Cmd_Result(0, 'run', 'pass')
mock_run.return_value = return_str
mock_local_nfs_setup.return_value = True
remote_ip = None
result = nfs.nfs_setup(self.server_ip, remote_ip, self.username,
self.password, self.nfs_path, self.mount_path, self.mock_logger)
assert result == True
@patch("libvirttestapi.utils.utils.remote_exec_pexpect")
@patch("libvirttestapi.utils.nfs.remote_mount")
@patch("libvirttestapi.utils.process.run")
@patch("libvirttestapi.utils.nfs.local_nfs_setup")
def test_nfsetup_pass(self, mock_local_nfs_setup, mock_run,
mock_remote_mount, mock_remote_exec_pexpect):
return_str = Cmd_Result(0, 'run', 'pass')
mock_run.return_value = return_str
mock_local_nfs_setup.return_value = True
mock_remote_mount.return_value = True
mock_remote_exec_pexpect.return_value = (0, 'pass')
result = nfs.nfs_setup(self.server_ip, self.remote_ip, self.username,
self.password, self.nfs_path, self.mount_path, self.mock_logger)
assert result == True
@patch("libvirttestapi.utils.utils.remote_exec_pexpect")
@patch("libvirttestapi.utils.nfs.remote_mount")
@patch("libvirttestapi.utils.process.run")
@patch("libvirttestapi.utils.nfs.local_nfs_setup")
def test_nfsetup_remotemount_fail(self, mock_local_nfs_setup, mock_run,
mock_remote_mount, mock_remote_exec_pexpect):
return_str = Cmd_Result(0, 'run', 'pass')
mock_run.return_value = return_str
mock_local_nfs_setup.return_value = True
mock_remote_mount.return_value = False
mock_remote_exec_pexpect.return_value = (0, 'pass')
result = nfs.nfs_setup(self.server_ip, self.remote_ip, self.username,
self.password, self.nfs_path, self.mount_path, self.mock_logger)
assert result == False
assert mock_remote_exec_pexpect.called == False
@patch("libvirttestapi.utils.utils.remote_exec_pexpect")
@patch("libvirttestapi.utils.nfs.remote_mount")
@patch("libvirttestapi.utils.process.run")
@patch("libvirttestapi.utils.nfs.local_nfs_setup")
def test_nfsetup_remoteexec_fail(self, mock_local_nfs_setup, mock_run,
mock_remote_mount, mock_remote_exec_pexpect):
return_str = Cmd_Result(0, 'run', 'pass')
mock_run.return_value = return_str
mock_local_nfs_setup.return_value = True
mock_remote_mount.return_value = False
mock_remote_exec_pexpect.return_value = (1, 'Fail')
result = nfs.nfs_setup(self.server_ip, self.remote_ip, self.username,
self.password, self.nfs_path, self.mount_path, self.mock_logger)
assert result == False
@patch("libvirttestapi.utils.utils.remote_exec_pexpect")
@patch("libvirttestapi.utils.nfs.remote_mount")
@patch("libvirttestapi.utils.process.run")
@patch("libvirttestapi.utils.nfs.local_nfs_setup")
def test_nfsetup_localnfssetup_fail(self, mock_local_nfs_setup, mock_run,
mock_remote_mount, mock_remote_exec_pexpect):
return_str = Cmd_Result(0, 'run', 'pass')
mock_run.return_value = return_str
mock_local_nfs_setup.return_value = False
result = nfs.nfs_setup(self.server_ip, self.remote_ip, self.username,
self.password, self.nfs_path, self.mount_path, self.mock_logger)
assert result == False
assert mock_remote_exec_pexpect.called == False
| gpl-2.0 |
40223101/w17test | static/Brython3.1.0-20150301-090019/Lib/weakref.py | 769 | 11495 | """Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from _weakrefset import WeakSet, _IterationGuard
import collections # Import after _weakref to avoid circular import.
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary",
"WeakSet"]
class WeakValueDictionary(collections.MutableMapping):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
del self.data[wr.key]
self._remove = remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
self.data = d = {}
self.update(*args, **kw)
def _commit_removals(self):
l = self._pending_removals
d = self.data
# We shouldn't encounter any KeyError, because this method should
# always be called *before* mutating the dict.
while l:
del d[l.pop()]
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError(key)
else:
return o
def __delitem__(self, key):
if self._pending_removals:
self._commit_removals()
del self.data[key]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
with _IterationGuard(self):
for k, wr in self.data.items():
v = wr()
if v is not None:
yield k, v
def keys(self):
with _IterationGuard(self):
for k, wr in self.data.items():
if wr() is not None:
yield k
__iter__ = keys
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
with _IterationGuard(self):
for wr in self.data.values():
yield wr
def values(self):
with _IterationGuard(self):
for wr in self.data.values():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
if self._pending_removals:
self._commit_removals()
while True:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
if self._pending_removals:
self._commit_removals()
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError(key)
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
if self._pending_removals:
self._commit_removals()
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return list(self.data.values())
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super().__init__(ob, callback)
class WeakKeyDictionary(collections.MutableMapping):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(k)
else:
del self.data[k]
self._remove = remove
# A list of dead weakrefs (keys to be removed)
self._pending_removals = []
self._iterating = set()
if dict is not None:
self.update(dict)
def _commit_removals(self):
# NOTE: We don't need to call this method before mutating the dict,
# because a dead weakref never compares equal to a live weakref,
# even if they happened to refer to equal objects.
# However, it means keys may already have been removed.
l = self._pending_removals
d = self.data
while l:
try:
del d[l.pop()]
except KeyError:
pass
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return False
return wr in self.data
def items(self):
with _IterationGuard(self):
for wr, value in self.data.items():
key = wr()
if key is not None:
yield key, value
def keys(self):
with _IterationGuard(self):
for wr in self.data:
obj = wr()
if obj is not None:
yield obj
__iter__ = keys
def values(self):
with _IterationGuard(self):
for wr, value in self.data.items():
if wr() is not None:
yield value
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return list(self.data)
def popitem(self):
while True:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
| gpl-3.0 |
pewsheen/BrowserStack-Automate-Screenshot-Compare | client.py | 1 | 9489 | """ Initiate jobs at Browserstack Screenshots and downloads the images when
they are complete. Optionally renames and slices images for use with
PhantomCSS visual regression testing tool """
import math
import os
import sys
import time
import re
import getopt
import ConfigParser
import subprocess
from PIL import Image
import requests
import browserstack_screenshots
try:
import simplejson as json
except ImportError:
import json
nextDelay = 0
phantomcss = False
rootDir = ''
totalUsedTime = 0
config = ConfigParser.ConfigParser()
MAX_RETRIES = 0
OUTPUT_DIR_PHANTOMCSS = ''
output_dir = ''
def _build_sliced_filepath(filename, slice_count):
""" append slice_count to the end of a filename """
root = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[1]
new_filepath = ''.join((root, str(slice_count), ext))
return _build_filepath_for_phantomcss(new_filepath)
def _build_filepath_for_phantomcss(filepath):
""" Prepare screenshot filename for use with phantomcss.
ie, append 'diff' to the end of the file if a baseline exists """
try:
if os.path.exists(filepath):
new_root = '.'.join((os.path.splitext(filepath)[0], 'diff'))
ext = os.path.splitext(filepath)[1]
diff_filepath = ''.join((new_root, ext))
if os.path.exists(diff_filepath):
print 'removing stale diff: {0}'.format(diff_filepath)
os.remove(diff_filepath)
return diff_filepath
else:
return filepath
except OSError, e:
print e
def _build_filename_from_browserstack_json(j):
""" Build a useful filename for an image from the screenshot json metadata """
filename = ''
device = j['device'] if j['device'] else 'Desktop'
if j['state'] == 'done' and j['image_url']:
detail = [device, j['os'], j['os_version'],
j['browser'], j['browser_version'], '.jpg']
filename = '_'.join(item.replace(" ", "_") for item in detail if item)
else:
print 'screenshot timed out, ignoring this result'
return filename
def _long_image_slice(in_filepath, out_filepath, slice_size):
""" Slice an image into parts slice_size tall. """
print 'slicing image: {0}'.format(in_filepath)
img = Image.open(in_filepath)
width, height = img.size
upper = 0
left = 0
slices = int(math.ceil(height / slice_size))
count = 1
for slice in range(slices):
# if we are at the end, set the lower bound to be the bottom of the image
if count == slices:
lower = height
else:
lower = int(count * slice_size)
# set the bounding box! The important bit
bbox = (left, upper, width, lower)
working_slice = img.crop(bbox)
upper += slice_size
# save the slice
new_filepath = _build_sliced_filepath(out_filepath, count)
working_slice.save(new_filepath)
count += 1
def _read_json(path):
global rootDir
try:
with open(path) as f:
browserStackConfig = f.read()
macPropertiesFile = open(rootDir+"config\\mac.properties", "r")
macProperties = macPropertiesFile.read()
browserStackConfig = browserStackConfig.replace('<macProperties>', macProperties)
macPropertiesFile.close()
pcPropertiesFile = open(rootDir+"config\\pc.properties", "r")
pcProperties = pcPropertiesFile.read()
browserStackConfig = browserStackConfig.replace('<pcProperties>', pcProperties)
pcPropertiesFile.close()
androidPropertiesFile = open(rootDir+"config\\android.properties", "r")
androidProperties = androidPropertiesFile.read()
browserStackConfig = browserStackConfig.replace('<androidProperties>', androidProperties)
androidPropertiesFile.close()
iosPropertiesFile = open(rootDir+"config\\ios.properties", "r")
iosProperties = iosPropertiesFile.read()
browserStackConfig = browserStackConfig.replace('<iosProperties>', iosProperties)
iosPropertiesFile.close()
f.close()
return json.loads(browserStackConfig)
except (EOFError, IOError), e:
print e
return {}
def _mkdir(path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != 17:
raise
def _download_file(uri, filename):
try:
with open(filename, 'wb') as handle:
request = requests.get(uri, stream=True)
for block in request.iter_content(1024):
if not block:
break
handle.write(block)
except IOError, e:
print e
def _purge(dir, pattern, reason=''):
""" delete files in dir that match pattern """
for f in os.listdir(dir):
if re.search(pattern, f):
print "Purging file {0}. {1}".format(f, reason)
os.remove(os.path.join(dir, f))
def retry(tries, delay=1, backoff=1.1):
"""Retries a function or method until it returns True."""
if backoff < 1:
raise ValueError("backoff must be greater than 1")
tries = math.floor(tries)
if tries < 0:
raise ValueError("tries must be 0 or greater")
if delay <= 0:
raise ValueError("delay must be greater than 0")
def deco_retry(f):
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
rv = f(*args, **kwargs) # first attempt
while mtries > 0:
if rv is True:
return True
mtries -= 1
time.sleep(mdelay)
mdelay *= backoff
global nextDelay
nextDelay = mdelay
rv = f(*args, **kwargs) # Try again
print str(tries) + " attempts. Abandoning."
return False # Ran out of tries
return f_retry
return deco_retry
def get_screenshots(s, job_id, result_dir):
screenshots_json = s.get_screenshots(job_id)
if screenshots_json:
# add new parameter to create screenshots in directory equal to filename config
new_directory = os.path.join(output_dir, result_dir)
_mkdir(new_directory)
try:
print ('Screenshot job complete. Saving files in %s'% new_directory)
_purge(new_directory, '.diff', 'stale diff')
for i in screenshots_json['screenshots']:
filename = _build_filename_from_browserstack_json(i)
base_image = os.path.join(new_directory, filename)
if filename:
_download_file(i['image_url'], base_image)
if phantomcss and os.path.isfile(base_image):
# slice the image. slicing on css selector could be better..
_long_image_slice(base_image, base_image, 300)
os.remove(base_image)
print 'Done saving.'
return True
except OSError, e:
print e
return False
else:
global totalUsedTime
totalUsedTime += nextDelay
print "Screenshots job incomplete. Waiting before retry... (waiting for " + format(nextDelay) + " seconds)"
return False
class ScreenshotIncompleteError(Exception):
pass
def main(argv):
def usage():
print 'Usage:\n-a, --auth <username:password>\n-c, --config <config_file>\n-p, --phantomcss'
try:
opts, args = getopt.getopt(argv, "a:c:p:o:t:r", ["auth=", "config=", "phantomcss", "output=", "target=", "root="])
except getopt.GetoptError:
usage()
sys.exit(2)
auth = None
config_file = ''
for opt, arg in opts:
if opt in ("-a", "--auth"):
auth = tuple(arg.split(':'))
if opt in ("-c", "--config"):
config_file = arg
if opt in ("-t", "--target"):
target_environment = arg
if opt in ("-p", "--phantomcss"):
global phantomcss, output_dir
phantomcss = True
output_dir = OUTPUT_DIR_PHANTOMCSS
if opt in ("-o", "--output"):
output_dir = arg
if opt in ("-r", "--root"):
global rootDir
rootDir = os.path.join("", arg)
global config, MAX_RETRIES
config.read(rootDir+'main_config.properties')
MAX_RETRIES = int(config.get('BROWSERSTACK', 'max_retries'))
if auth is None:
api_user = config.get('BROWSERSTACK', 'bs_username')
api_token = config.get('BROWSERSTACK', 'bs_api_key')
auth = (api_user, api_token)
config = _read_json(config_file) if config_file else None
print 'using config {0}'.format(config_file)
print config
# get config filename, after removing .json - create new result directory for this config
path, filename=os.path.split(config_file)
result_dir=filename.split(".")[0]
# Start get screenshot process
s = browserstack_screenshots.Screenshots(auth=auth, config=config)
generate_resp_json = s.generate_screenshots()
job_id = generate_resp_json['job_id']
@retry(MAX_RETRIES, 30, 1)
def retry_get_screenshots(s, job_id, result_dir):
return get_screenshots(s, job_id, result_dir)
print "BrowserStack url http://www.browserstack.com/screenshots/{0}".format(job_id)
if not retry_get_screenshots(s, job_id, result_dir):
print """ Failed. The job was not complete at Browserstack after x
attempts. You may need to increase the number of retry attempts """
else :
global totalUsedTime
print 'Total time: {0}'.format(totalUsedTime)
# '''
# Capture job success, start compare here
# '''
screenshot_dir = os.path.abspath(os.path.join(output_dir, os.pardir))
working_dir = target_environment + '\\' + result_dir
diff_dir = os.path.join(screenshot_dir, 'diff')
# '''
# subprocess.Popen([...])
# 0: node
# 1: compare.js
# 2: Screenshot folder
# Example - D:\screenshot\2015-07-22_0132_PM\
# 3: Diff output folder
# Example - D:\screenshot\2015-07-22_0132_PM\diff\
# 4: Working dir
# Example - asia\japan
# 5: Screenshot execute root directory
# Example - C:\Users\Administrator\Desktop\bsautomate\
# '''
compareP = subprocess.Popen(["node", rootDir+"LimaCompare\\compare.js", screenshot_dir, diff_dir, working_dir, rootDir], stdout=subprocess.PIPE , stderr=subprocess.PIPE)
stdout, stderr = compareP.communicate()
print '\n----------------- Compare mesg ------------------------\n'
print stdout
print '\n----------------- Compare mesg ERR --------------------\n'
print stderr
if __name__ == "__main__":
main(sys.argv[1:]) | mit |
ksun6868/xbmc-addons-chinese | plugin.video.yinyuetai/default.py | 2 | 50428 | # -*- coding: utf-8 -*-
import urllib,urllib2,re,os,xbmcplugin,xbmcgui,xbmc
import xbmcaddon
import datetime
import gzip, StringIO
import cookielib
import base64
try:
import simplejson
except ImportError:
import json as simplejson
##########################################################################
# 音悦台MV
##########################################################################
# Version 1.7.5 2014-06-15 (cmeng)
# Replace embedded unicode with ascii in get_vurl
##########################################################################
__addonname__ = "音悦台MV"
__addonid__ = "plugin.video.yinyuetai"
__addon__ = xbmcaddon.Addon(id=__addonid__)
__addonicon__ = os.path.join( __addon__.getAddonInfo('path'), 'icon.png' )
__settings__ = xbmcaddon.Addon(id=__addonid__)
__icon__ = xbmc.translatePath( __settings__.getAddonInfo('icon') )
__profile__ = xbmc.translatePath( __settings__.getAddonInfo('profile') )
cookieFile = __profile__ + 'cookies.yinyuetai'
UserAgent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
#FCS_LIST = [['','首播'],['index-ml','内地'],['index-ht','港台'],['index-us','欧美'],['index-kr','韩语'],['index-jp','日语'],['index-yyman','音悦人'],['index-elite','热门推荐']]
FCS_LIST = [['all','全部'],['ml','内地'],['ht','港台'],['us','欧美'],['kr','韩国'],['jp','日本']]
MVR_LIST = [['all','全部推荐'],['ML','内地推荐'],['HT','港台推荐'],['US','欧美推荐'],['KR','韩语推荐'],['JP','日语推荐']]
MVR_DATE = [['today','今日'],['week','本周'],['month','本月']]
MVF_LIST = [['newRecommend','最新推荐'],['newFavorite','最新收藏'],['newComment','最新评论'],['hotView','热门播放'],['hotRecommend','热门推荐'],['hotFavorite','热门收藏'],['hotComment','热门评论'],['promo','编辑推荐'],['all','全部悦单']]
MVO_LIST = [['all','全部热门'],['today','24小时热门'],['week','本周热门'],['month','本月热门']]
AREA_LIST = [['','全部地区'],['ML','内地'],['HT','港台'],['US','欧美'],['KR','韩国'],['JP','日本']]
PAGE_LIST = [['1','TOP:1-20'],['2','TOP:21-40'],['3','TOP:41-50']]
VCHART_LIST = [['ML','内地篇'],['HT','港台篇'],['US','欧美篇'],['KR','韩国篇'],['JP','日本篇']]
GS_LIST = [['','全部歌手'],['Girl','女歌手'],['Boy','男歌手'],['Combo','乐队/组合']]
##################################################################################
# Routine to fetch url site data using Mozilla browser
# - delete '\r|\n|\t' for easy re.compile
# - do not delete \s <space> as some url include spaces
# - unicode with 'replace' option to avoid exception on some url
# - translate to utf8
##################################################################################
def getHttpData(url):
print "getHttpData: " + url
# setup proxy support
proxy = __addon__.getSetting('http_proxy')
type = 'http'
if proxy <> '':
ptype = re.split(':', proxy)
if len(ptype)<3:
# full path requires by Python 2.4
proxy = type + '://' + proxy
else: type = ptype[0]
httpProxy = {type: proxy}
else:
httpProxy = {}
proxy_support = urllib2.ProxyHandler(httpProxy)
# setup cookie support
cj = cookielib.MozillaCookieJar(cookieFile)
if os.path.isfile(cookieFile):
cj.load(ignore_discard=False, ignore_expires=False)
else:
if not os.path.isdir(os.path.dirname(cookieFile)):
os.makedirs(os.path.dirname(cookieFile))
# create opener for both proxy and cookie
opener = urllib2.build_opener(proxy_support, urllib2.HTTPCookieProcessor(cj))
charset=''
req = urllib2.Request(url)
req.add_header('User-Agent', UserAgent)
try:
response = opener.open(req)
#response = urllib2.urlopen(req)
except urllib2.HTTPError, e:
httpdata = e.read()
except urllib2.URLError, e:
httpdata = "IO Timeout Error"
else:
httpdata = response.read()
if response.headers.get('content-encoding', None) == 'gzip':
httpdata = gzip.GzipFile(fileobj=StringIO.StringIO(httpdata)).read()
charset = response.headers.getparam('charset')
cj.save(cookieFile, ignore_discard=True, ignore_expires=True)
response.close()
httpdata = re.sub('\r|\n|\t', '', httpdata)
match = re.compile('<meta.+?charset=["]*(.+?)"').findall(httpdata)
if len(match):
charset = match[0]
if charset:
charset = charset.lower()
if (charset != 'utf-8') and (charset != 'utf8'):
httpdata = httpdata.decode(charset, 'ignore').encode('utf8', 'ignore')
return httpdata
##################################################################################
def get_flv_url(url):
# http://www.flvcd.com/parse.php?flag=&format=&kw=http://3A%2F%2Fwww.yinyuetai.com%2Fvideo%2F389970&sbt=%BF%AA%CA%BCGO%21
videoRes = int(__addon__.getSetting('video_resolution'))
vparamap = {0:'normal', 1:'high', 2:'super'}
p_url = "http://www.flvcd.com/parse.php?kw="+url+"&format="+vparamap.get(videoRes,0)
for i in range(5): # Retry specified trials before giving up (seen 9 trials max)
try: # stop xbmc from throwing error to prematurely terminate video queuing
link = getHttpData(p_url)
match=re.compile('下载地址:\s*<a href="(.+?)" target="_blank" class="link"').findall(link)
if len(match): return match[0]
except:
pass
# facing slow response
def get_flv_urlx(url):
videoRes = int(__addon__.getSetting('video_resolution'))
vparamap = {0:'[流畅]', 1:'[高清]', 2:'[超清]'}
encodedUri = base64.b64encode(url)
p_url = "http://www.flvxz.com/getFlv.php?url="+encodedUri
for i in range(5): # Retry specified trials before giving up (seen 9 trials max)
try: # stop xbmc from throwing error to prematurely terminate video queuing
link = getHttpData(p_url)
match=re.compile('<span style="color:red">'+vparamap.get(videoRes,0)+'</span>.+?<a target="_blank" href="(.+?)">Preview this part</a>').findall(link)
if len(match):
return match[0]
except:
pass
##################################################################################
# Get imgae from local storage if available
# Fetch from Web if none found - currently disabled
##################################################################################
def get_Thumb(icon):
if len(icon) < 2:
return __icon__
url = icon.split('?')[0]
len_http = len(url.split('/')[2]) + 8
pic = __profile__ + url[len_http:]
if not os.path.isfile(pic):
if not os.path.isdir(os.path.dirname(pic)):
os.makedirs(os.path.dirname(pic))
try:
pic=urllib.urlretrieve(url, pic)[filename]
except:
pass
return pic
##################################################################################
# Routine to extract url ID from array based on given selected filter
##################################################################################
def fetchID(dlist, idx):
for i in range(0, len(dlist)):
if dlist[i][1] == idx:
return dlist[i][0]
return ''
##################################################################################
# Routine to fetch and build video filter list
# tuple to list conversion and strip spaces
# - 按类型 (Categories)
# - 按地区 (Countries/Areas)
# - 按年份 (Year)
# - etc
##################################################################################
def getListMV(listpage):
match = re.compile('<ul name="area">(.+?)</ul>').findall(listpage)
arealist = re.compile('<a href=".+?name="(.+?)".+?>[\s]*(.+?)</a>',re.DOTALL).findall(match[0])
if len(arealist)>0:
#arealist.pop(0)
arealist.insert(0,['','全部地区'])
match = re.compile('<ul name="artist">(.+?)</ul>').findall(listpage)
artistlist = re.compile('<a href=".+?name="(.+?)".+?>(.+?)</a>',re.DOTALL).findall(match[0])
if len(artistlist)>0:
#artistlist.pop(0)
artistlist.insert(0,['','全部类别'])
match = re.compile('<ul name="version">(.+?)</ul>').findall(listpage)
versionlist = re.compile('<a href=".+?name="(.+?)".+?>(.+?)</a>',re.DOTALL).findall(match[0])
if len(versionlist)>0:
#versionlist.pop(0)
versionlist.insert(0,['','全部视频'])
match = re.compile('<ul name="tag">(.+?)</ul>').findall(listpage)
taglist = re.compile('<a href=".+?name="(.+?)".+?>(.+?)</a>',re.DOTALL).findall(match[0])
if len(taglist)>0:
#taglist.pop(0)
taglist.insert(0,['','全部标签'])
match = re.compile('<ul name="genre">(.+?)</ul>').findall(listpage)
genrelist = re.compile('<a href=".+?name="(.+?)".+?>(.+?)</a>',re.DOTALL).findall(match[0])
if len(genrelist)>0:
#genrelist.pop(0)
genrelist.insert(0,['','全部流派'])
return arealist,artistlist,versionlist,taglist,genrelist
##################################################################################
# Routine to fetch and build VChart filter list
# http://www.yinyuetai.com/vchart/video-rank-week-date?area=ML&year=2012
# {"year":0,"dateCode":20111219,"periods":"07","beginDateText":"12.19","endDateText":"12.25"}
# <a href="javascript:void(0)" val="20120709">29期(07.09-07.15)</a>
##################################################################################
def getTimeList(area):
yearlist=[]
year = datetime.datetime.now().year
yearlist.append(year)
yearlist.append(year-1)
timelist=[]
for x in yearlist: # get 2 years worth of data only
p_url = 'http://vchart.yinyuetai.com/vchart/video-rank-week-date?area='+area+'&year='+str(x)
link=getHttpData(p_url)
xlist = re.compile('{"year":.+?,"dateCode":(.+?),"periods":"(.+?)","beginDateText":"(.+?)","endDateText":"(.+?)"}').findall(link)
if len(xlist) == 0: continue
for datecode, period, begindate, enddate in xlist:
xstr = period+'期('+begindate+'-'+enddate+')'
#xstr = period+'('+begindate+'-'+enddate+')'
timelist.append([datecode,xstr,str(x)])
#print 'datelist', datelist
return timelist
##################################################################################
def addDir(name,url,mode,pic,isDir=True,sn=''):
if sn != '': sn=str(sn)+". "
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
li=xbmcgui.ListItem(sn+name,'', pic, pic)
li.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=li,isFolder=isDir)
return ok
##################################################################################
# Yinyuetai Main Menu
##################################################################################
def MainMenu(ctl):
vlist = [x for x in ctl[None][2]]
j=0
for mode in vlist:
j+=1
name = ctl[mode][1]
url = 'http://www.yinyuetai.com'+ctl[mode][2]
isDir = ctl[mode][3]
pic = __addonicon__
addDir(name,url,mode,pic,isDir,j)
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
##################################################################################
# http://www.yinyuetai.com/vchart/include/trends-list?page=1&area=ML&trendUrl=/vchart/trends?page=1&area=ML
# http://www.yinyuetai.com/vchart/ajax/trends-list?page=2&area=ML¤tUrl=%2Fvchart%2Ftrends%3Farea%3DML%23!page%3D2
# http://www.yinyuetai.com/vchart/ajax/trends-list?page=2&area=ML¤tUrl=/vchart/trends?area=ML#!page=2
##################################################################################
def listVChart(name,area,date,timelist):
# fetch user specified parameters
if area is None: area = '内地篇'
fltrArea = fetchID(VCHART_LIST, area)
if timelist is None:
timelist = getTimeList(fltrArea)
if date is None: date = timelist[0][1]
fltrDate = fetchID(timelist, date)
year = fltrDate[:4]
# Fetch & build video titles list for user selection, highlight user selected filter
li = xbmcgui.ListItem('[COLOR FF00FFFF]'+name+'[/COLOR]【[COLOR FFFF0000]'+area+'[/COLOR]/[COLOR FF00FF00]'+year+'[/COLOR]/[COLOR FF5555FF]'+date+'[/COLOR]】(按此选择)')
u = sys.argv[0] + "?mode=11&name="+urllib.quote_plus(name)+"&area="+area+"&date="+date
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True)
url = 'http://vchart.yinyuetai.com/vchart/ajax/vchart?date='+fltrDate+'&area='+fltrArea+'¤tUrl=/vchart/v/area='+fltrArea+'%26date='+fltrDate
link = getHttpData(url)
if link == None: return
matchli = re.compile('<li name="vlist_li".+?>(.+?)</ul></div></div></li>').findall(link)
if len(matchli):
totalItems=len(matchli)
playlist=xbmc.PlayList(0) # use Music playlist for temporary storage
playlist.clear()
j=0
for item in matchli:
matchp=re.compile('<a href="(.+?)" target="_blank"><img src="(.+?)" alt="(.+?)"/>').findall(item)
p_url = matchp[0][0]
p_thumb = matchp[0][1]
p_thumb += '|Referer=http://www.yinyuetai.com'
p_name = matchp[0][2]
artist=re.compile('<a href=".+?/fanclub.+?target="_blank">(.+?)</a>').findall(item)
p_artist = artist[0]
matchp=re.compile('<div class="number" name="data_info">(.+?)</div>').findall(item)
p_score = matchp[0].strip()
matchp=re.compile('<li>发布时间:(.+?)</li>').findall(item)
p_date = matchp[0]
j+=1
p_list = str(j)+'. '+p_name+' [COLOR FFFF55FF]['+p_artist+'][/COLOR][COLOR FFFFFF55] ('+p_score+') [/COLOR]['+p_date+']'
li = xbmcgui.ListItem(p_list, iconImage = '', thumbnailImage = p_thumb)
li.setInfo(type = "Video", infoLabels = {"Title":p_list, "Artist":p_artist.split(',')})
u = sys.argv[0]+"?mode=10"+"&name="+urllib.quote_plus(p_list)+"&url="+urllib.quote_plus(p_url)+"&thumb="+urllib.quote_plus(p_thumb)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)
playlist.add(p_url, li)
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
##################################################################################
# Routine to update video list as per user selected filters
##################################################################################
def performChangeVChart(name,area,date,timelist):
change = False
dialog = xbmcgui.Dialog()
list = [x[1] for x in VCHART_LIST]
sel = dialog.select('悦单', list)
if sel != -1:
area = VCHART_LIST[sel][1]
change = True
list=[]
timelist = getTimeList(VCHART_LIST[sel][0])
year = fetchID(timelist, date)[:4]
for x in timelist:
if x[2] not in list: list.append(x[2])
if len(list) > 2:
sel = dialog.select('年份', list)
if sel != -1:
year = list[sel]
change = True
else:
year = list[0]
list=[]
list = [x[1] for x in timelist if x[2] == year]
sel = dialog.select(year+' 期份', list)
if sel != -1:
date = list[sel]
change = True
if change: listVChart(name,area,date,timelist)
##################################################################################
# Routine to update video list as per user selected filters
##################################################################################
def performChangeVChartx(name,area,page):
change = False
dialog = xbmcgui.Dialog()
list = [x[1] for x in VCHART_LIST]
sel = dialog.select('悦单', list)
if sel != -1:
area = VCHART_LIST[sel][1]
change = True
list = [x[1] for x in PAGE_LIST]
sel = dialog.select('排名', list)
if sel != -1:
page = PAGE_LIST[sel][1]
change = True
if change: listVChart(name,area,page)
##################################################################################
# http://www.yinyuetai.com/index-ml
##################################################################################
def listFocusMV(name,p_url,cat):
# fetch user specified parameters
if cat == None: cat = '全部'
fltrCat = fetchID(FCS_LIST, cat)
# url = 'http://www.yinyuetai.com/ajax/zhengliuxing?area=' + fltrCat
# url = 'http://www.yinyuetai.com/ajax/shoubo?area=' + fltrCat
url = p_url + fltrCat
# Fetch & build video titles list for user selection, highlight user selected filter
li = xbmcgui.ListItem('[COLOR FF00FFFF]'+name+'[/COLOR]【[COLOR FF00FF00]'+cat+'[/COLOR]】(按此选择)')
u = sys.argv[0] + "?mode=12&name="+urllib.quote_plus(name)+"&url="+urllib.quote_plus(p_url)+"&cat="+cat
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True)
link=getHttpData(url)
if link == None: return
playlist=xbmc.PlayList(0) # use Music playlist for temporary storage
playlist.clear()
# fetch and build the video series episode list
vlist = simplejson.loads(link)
totalItems = len(vlist)
for i in range(0, totalItems):
vid = str(vlist[i]['videoId'])
#v_url = 'http://www.yinyuetai.com/mv/get-video-info?videoId=' + vid
v_url = 'http://www.yinyuetai.com/video/' + vid
p_thumb = vlist[i]['image']
p_title = vlist[i]['title'].encode('utf-8')
p_artists = vlist[i]['artists']
p_artist =''
for j in range(0, len(p_artists)):
p_artist += p_artists[j]['artistName'].encode('utf-8') + ', '
p_list = p_name = str(i+1) + '. ' + p_title
p_list += ' [COLOR FF00FFFF][' + p_artist[:-2] + '][/COLOR]'
li = xbmcgui.ListItem(p_list, iconImage='', thumbnailImage=p_thumb)
li.setInfo(type = "Video", infoLabels = {"Title":p_list, "Artist":p_artist.split(',')})
u = sys.argv[0]+"?mode=10"+"&name="+urllib.quote_plus(p_list)+"&url="+urllib.quote_plus(v_url)+"&thumb="+urllib.quote_plus(p_thumb)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)
playlist.add(v_url, li)
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
##################################################################################
# Routine to update video list as per user selected filters
##################################################################################
def performChangeFocus(name,url,cat):
change = False
dialog = xbmcgui.Dialog()
list = [x[1] for x in FCS_LIST]
sel = dialog.select(name, list)
if sel != -1:
cat = FCS_LIST[sel][1]
change = True
if change: listFocusMV(name,url,cat)
#############################################################################################
# Routine to fetch and build the video selection menu
# - selected page & filters (user selectable)
# - video items list
# - user selectable pages
# #### url parameter decode #####
# http://www.yinyuetai.com/mv/all?area=ML&artist=Girl&version=concert&tag=%E7%BB%8F%E5%85%B8&genre=1&enName=A&page=1
# area: 艺人地区 = 全部 内地 港台 欧美 韩语 日语 其他
# artist: 艺人类别 = 全部 女艺人 男艺人 乐队组合 其他
# version: 视频分类 = 全部 官方版 演唱会 现场版 饭团视频 字幕版 其他
# tag: 视频标签 = 全部 超清 首播 热门 经典 励志 搞笑 影视 创意 爱情 伤感 热舞 翻唱 演唱会
# genre:艺人流派 = 全部 流行 民谣 蓝调 古典 乡村 舞曲 电子 嘻哈/说唱 独立 爵士 拉丁 金属 R&B 摇滚 电影原声
# 世界音乐 环境音乐 另类 放克 硬核 朋克 轻音乐 搞笑 儿童音乐 雷鬼 中国风 灵魂 后摇 民族风
# enName: A-Z
# page: page number
#############################################################################################
def listAllMV(name,url,area,artist,version,tag, genre,fname,order,page,listpage):
if listpage is None:
link=getHttpData(url)
if link == None: return
match = re.compile('<div class="allCategory" id="allCategory">(.+?)<div id="mvlist" class="mv_list_vertical">').findall(link)
listpage = match[0]
arealist,artistlist,versionlist,taglist,genrelist = getListMV(listpage)
# fetch user specified parameters
if area == None:
area = '全部地区'
fltrArea = fetchID(arealist, area)
if artist == None:
artist = '全部类别'
fltrArtist = fetchID(artistlist, artist)
if version == None:
version = '全部视频'
fltrVersion = fetchID(versionlist,version)
if tag == None:
tag = '全部标签'
fltrTag = fetchID(taglist, tag)
if genre == None:
genre = '全部流派'
fltrGenre = fetchID(genrelist,genre )
if fname == None: fname = '全部'
if page is None: page = 1
# Fetch & build video titles list for user selection, highlight user selected filter
url = 'http://mv.yinyuetai.com/all?&sort=pubdate&area='+fltrArea+'&artist='+fltrArtist+'&version='+fltrVersion+'&tag='+urllib.quote(fltrTag)+'&genre='+fltrGenre
if fname <> '全部':
url += '&enName='+fname
url += '&page='+str(page)
li = xbmcgui.ListItem('[COLOR FF00FFFF]'+name+'[/COLOR](第'+str(page)+'页)【[COLOR FFFF0000]'+area+'[/COLOR]/[COLOR FF00FF00]'+artist+'[/COLOR]/[COLOR FF5555FF]'+version+'[/COLOR]/[COLOR FFFFFF00]'+tag+'[/COLOR]/[COLOR FFFF55FF]'+genre+'[/COLOR]/[COLOR FFFF5555]姓:'+fname+'[/COLOR]】(按此选择)')
u = sys.argv[0]+"?mode=13&name="+urllib.quote_plus(name)+"&url="+urllib.quote_plus(url)+"&area="+area+"&artist="+artist+"&area="+area+"&version="+version+"&tag="+urllib.quote(tag)+"&genre="+genre+"&fname="+urllib.quote(fname)+"&order="+"&page="+str(page)+"&listpage="+urllib.quote_plus(listpage)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True)
link=getHttpData(url)
if link == None: return
matchs=re.compile('<div id="mvlist".+?class="mv_list_vertical">(.+?)</ul>').findall(link)
matchli=re.compile('<li>(.+?)</li>').findall(matchs[0])
totalItems=len(matchli)
if totalItems == 0:
li=xbmcgui.ListItem('[COLOR FFFF0000]非常抱歉 ![/COLOR] 您选择的查询条件暂无结果')
xbmcplugin.addDirectoryItem(int(sys.argv[1]),u,li,True)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
else:
playlist=xbmc.PlayList(0) # use Music playlist for temporary storage
playlist.clear()
j=0
for item in matchli:
item = item.replace('\\"','\'')
match=re.compile('<a href="(.+?)" target="_blank">').findall(item)
#p_url = 'http://www.yinyuetai.com' + match[0]
p_url = match[0]
match=re.compile('<img src="(.+?)" alt="(.+?)"').findall(item)
p_thumb = match[0][0]
p_thumb += '|Referer=http://www.yinyuetai.com'
p_name = match[0][1]
p_artist=''
match=re.compile('target="_blank" class="c3" title="(.+?)">').findall(item)
if len(match): p_artist = match[0]
j +=1
p_list = str(j)+'. '+p_name
if p_artist: p_list+=' ['+p_artist +']'
li = xbmcgui.ListItem(p_list, iconImage = '', thumbnailImage = p_thumb)
li.setInfo(type = "Video", infoLabels = {"Title":p_list, "Artist":p_artist.split(',')})
u = sys.argv[0]+"?mode=10"+"&name="+urllib.quote_plus(p_list)+"&url="+urllib.quote_plus(p_url)+"&thumb="+urllib.quote_plus(p_thumb)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)
playlist.add(p_url, li)
# Fetch and build page selection menu
matchp=re.compile('<div class="page-nav"(.+?)</div>').findall(link)
if len(matchp):
matchp1=re.compile('<a href=".+?>([0-9]+)</a>', re.DOTALL).findall(matchp[0])
plist=[str(page)]
for num in matchp1:
if num not in plist:
plist.append(num)
li = xbmcgui.ListItem("... 第" + num + "页")
u = sys.argv[0]+"?mode=3&name="+urllib.quote_plus(name)+"&url="+urllib.quote_plus(url)+"&area="+area+"&artist="+artist+"&area="+area+"&version="+version+"&tag="+urllib.quote(tag)+"&genre="+genre+"&fname="+fname+"&order="+"&page="+num+"&listpage="+urllib.quote_plus(listpage)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True)
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
##################################################################################
# Routine to update video list as per user selected filters
# - 按类型 (Categories)
# - 按地区 (Areas)
# - 按年份 (Year)
# - 排序方式 (Selection Order) etc
##################################################################################
def performChangesAllMV(name,url,area,artist,version,tag, genre,fname,order,page,listpage):
change = False
dialog = xbmcgui.Dialog()
arealist,artistlist,versionlist,taglist,genrelist = getListMV(listpage)
if len(arealist)>0:
list = [x[1] for x in arealist]
sel = dialog.select('艺人地区', list)
if sel != -1:
area = arealist[sel][1]
change = True
if len(artistlist)>0:
list = [x[1] for x in artistlist]
sel = dialog.select('艺人类别', list)
if sel != -1:
artist = artistlist[sel][1]
change = True
if len(versionlist)>0:
list = [x[1] for x in versionlist]
sel = dialog.select('视频分类', list)
if sel != -1:
version = versionlist[sel][1]
change = True
if len(taglist)>0:
list = [x[1] for x in taglist]
sel = dialog.select('视频标签', list)
if sel != -1:
tag = taglist[sel][1]
change = True
if len(genrelist)>0:
list = [x[1] for x in genrelist]
sel = dialog.select('艺人流派', list)
if sel != -1:
genre = genrelist[sel][1]
change = True
list = [chr(i) for i in xrange(ord('A'),ord('Z')+1)]
list.insert(0,'全部')
sel = dialog.select('姓', list)
if sel != -1:
fname = list[sel]
change = True
if change:
listAllMV(name,url,area,artist,version,tag, genre,fname,order,'1',listpage)
##################################################################################
# http://www.yinyuetai.com/lookVideo-area/ML/4
# http://www.yinyuetai.com/mv/include/recommend-list?area=ML&page=1&pageType=page
# http://mv.yinyuetai.com/ajax/recommend-list?page=1
##################################################################################
def listRecommendMV(name, page):
if page == None: page ='1'
p_url = "http://mv.yinyuetai.com/ajax/recommend-list?page="
url = p_url + page
# Fetch & build video titles list for user selection, highlight user selected filter
li = xbmcgui.ListItem('[COLOR FF00FFFF]'+name+'[/COLOR]【[COLOR FF00FF00]Page: '+page+'[/COLOR]】')
u = sys.argv[0] + "?mode=4&name="+urllib.quote_plus(name)+"&page="+page
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True)
link=getHttpData(url)
if link == None: return
playlist=xbmc.PlayList(0) # use Music playlist for temporary storage
playlist.clear()
# fetch and build the video series episode list
content = simplejson.loads(link)
vlist = content['output']
totalItems = len(vlist)
for i in range(0, totalItems):
vid = str(vlist[i]['id'])
#v_url = 'http://www.yinyuetai.com/mv/get-video-info?videoId=' + vid
v_url = 'http://www.yinyuetai.com/video/' + vid
p_thumb = vlist[i]['bigHeadImg']
p_title = vlist[i]['filterTitle'].encode('utf-8')
p_artist = vlist[i]['artistName'].encode('utf-8')
p_list = p_name = str(i+1) + '. ' + p_title
p_list += ' [COLOR FF00FFFF][' + p_artist + '][/COLOR]'
li = xbmcgui.ListItem(p_list, iconImage='', thumbnailImage=p_thumb)
li.setInfo(type = "Video", infoLabels = {"Title":p_list, "Artist":p_artist.split(',')})
u = sys.argv[0]+"?mode=10"+"&name="+urllib.quote_plus(p_list)+"&url="+urllib.quote_plus(v_url)+"&thumb="+urllib.quote_plus(p_thumb)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)
playlist.add(v_url, li)
# Fetch and build page selection menu
p_itemCount= content['count']
p_pageNum = content['pageNum']
p_pageSize = content['pageSize']
p_pageTotal = p_itemCount / p_pageSize
for num in range(p_pageTotal):
page = num + 1
if (page) != p_pageNum:
li = xbmcgui.ListItem("... 第" + str(page) + "页")
u = sys.argv[0] + "?mode=4&name="+urllib.quote_plus(name)+"&page="+str(page)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True)
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
##################################################################################
# Routine to update video list as per user selected filters
##################################################################################
def performChangesMV(name,area,page):
change = False
dialog = xbmcgui.Dialog()
list = [x[1] for x in MVR_LIST]
sel = dialog.select('地区', list)
if sel != -1:
area = MVR_LIST[sel][1]
change = True
list = [x[1] for x in MVR_DATE]
sel = dialog.select('日期', list)
if sel != -1:
page = MVR_DATE[sel][1]
change = True
if change: listRecommendMV(name,area,page)
##################################################################################
# http://www.yinyuetai.com/pl/playlist_newRecommend/all
##################################################################################
def listFavouriteMV(name,cat,order,page):
# fetch user specified parameters
if cat == None: cat = '最新推荐'
fltrCat = fetchID(MVF_LIST, cat)
if order == None:
order = '全部热门'
fltrOrder = fetchID(MVO_LIST, order)
if page is None: page = 1
if re.search('热门',cat):
url = 'http://pl.yinyuetai.com/playlist_'+fltrCat+'/'+fltrOrder+'/'+str(page)
li = xbmcgui.ListItem('[COLOR FF00FFFF]'+name+'[/COLOR](第'+str(page)+'页)【[COLOR FFFF0000]'+cat+'[/COLOR]/[COLOR FF00FF00]'+order+'[/COLOR]】(按此选择)')
else:
url = 'http://pl.yinyuetai.com/playlist_'+fltrCat+'?page='+str(page)
li = xbmcgui.ListItem('[COLOR FF00FFFF]'+name+'[/COLOR](第'+str(page)+'页)【[COLOR FF00FF00]'+cat+'[/COLOR]】(按此选择)')
# Fetch & build video titles list for user selection, highlight user selected filter
u = sys.argv[0] + "?mode=15&name="+urllib.quote_plus(name)+"&cat="+cat+"&order="+order+"&page="+str(page)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True)
link=getHttpData(url)
if link == None: return
matchs=re.compile('<div id="main">(.+?)</ul>').findall(link)
matchli=re.compile('<div class="thumb_box">(.+?)</li>').findall(matchs[0])
if len(matchli):
totalItems=len(matchli)
playlist=xbmc.PlayList(0) # use Music playlist for temporary storage
playlist.clear()
j=0
for item in matchli:
match=re.compile('<a href="(.+?)" target="_blank" title="(.+?)">[\s]*<img src="(.+?)"').findall(item)
#p_url = 'http://www.yinyuetai.com' + match[0][0]
p_url = match[0][0]
p_name = match[0][1]
p_name = p_name.replace("<", "<").replace(">", ">")
p_thumb = match[0][2]
p_thumb += '|Referer=http://www.yinyuetai.com'
p_artist=''
match=re.compile('target="_blank">(.+?)</a>:').findall(item)
if len(match): p_artist = match[0]
j+=1
p_list = str(j)+'. '+p_name
if p_artist: p_list+=' ['+p_artist +']'
li = xbmcgui.ListItem(p_list, iconImage = '', thumbnailImage = p_thumb)
li.setInfo(type = "Video", infoLabels = {"Title":p_list, "Artist":p_artist.split(',')})
u = sys.argv[0]+"?mode=10"+"&name="+urllib.quote_plus(p_list)+"&url="+urllib.quote_plus(p_url)+"&thumb="+urllib.quote_plus(p_thumb)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)
playlist.add(p_url, li)
# Fetch and build page selection menu
matchp=re.compile('<div class="page-nav">(.+?)</div>').findall(link)
if len(matchp):
matchp1=re.compile('<a href=".+?>([0-9]+)</a>', re.DOTALL).findall(matchp[0])
plist=[str(page)]
for num in matchp1:
if num not in plist:
plist.append(num)
li = xbmcgui.ListItem("... 第" + num + "页")
u = sys.argv[0] + "?mode=5&name="+urllib.quote_plus(name)+"&cat="+cat+"&order="+order+"&page="+num
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True)
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
##################################################################################
# Routine to update video list as per user selected filters
##################################################################################
def performChangeFavourite(name,cat,order,page):
change = False
dialog = xbmcgui.Dialog()
list = [x[1] for x in MVF_LIST]
sel = dialog.select('悦单', list)
if sel != -1:
cat = MVF_LIST[sel][1]
change = True
if re.search('热门',cat):
list = [x[1] for x in MVO_LIST]
sel = dialog.select('排序方式', list)
if sel != -1:
order = MVO_LIST[sel][1]
change = True
if change: listFavouriteMV(name,cat,order,1)
##################################################################################
# http://www.yinyuetai.com/fanAll?area=ML&property=Girl&enName=F&page=1
##################################################################################
def listArtist(name,area,geshou,fname,page):
# fetch user specified parameters
if area == None: area = '全部地区'
fltrArea = fetchID(AREA_LIST, area)
if geshou == None:
geshou = '全部歌手'
fltrGeshou = fetchID(GS_LIST, geshou)
if fname == None: fname = '全部'
if page is None: page = 1
# Fetch & build video titles list for user selection, highlight user selected filter
url = 'http://www.yinyuetai.com/fanAll?area='+fltrArea+'&property='+fltrGeshou
if fname <> '全部':
url += '&enName='+fname
url += '&page='+str(page)
li = xbmcgui.ListItem('[COLOR FF00FFFF]'+__addonname__+'[/COLOR](第'+str(page)+'页)【[COLOR FFFF0000]'+area+'[/COLOR]/[COLOR FF00FF00]'+geshou+'[/COLOR]/[COLOR FFFF5555]姓:'+fname+'[/COLOR]】(按此选择)')
u = sys.argv[0]+"?mode=16&name="+urllib.quote_plus(name)+"&area="+urllib.quote_plus(area)+"&geshou="+urllib.quote_plus(geshou)+"&fname="+fname+"&page="+str(page)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True)
link=getHttpData(url)
if link == None: return
match=re.compile('<span class="groupcover"(.+?)</li>').findall(link)
if len(match):
totalItems = len(match)
for i in range(0, len(match)):
match1 = re.compile('fanid=.+?<a href="(.+?)"').findall(match[i])
p_url1 = match1[0]
artistid = p_url1.split('/')[2]
p_url = 'http://www.yinyuetai.com/fanclub/mv-all/'+artistid+'/toNew'
match1 = re.compile('<img.+?src="(.+?)"/>').findall(match[i])
if match1:
p_thumb = match1[0]
p_thumb += '|Referer=http://www.yinyuetai.com'
else: p_thumb =''
match1 = re.compile('<div class="info">.+?<a href="(.+?)"').findall(match[i])
p_url2 = match1[0]
match1 = re.compile('class="song" title="(.+?)">').findall(match[i])
p_name = match1[0]
p_list = str(i+1)+'. '+p_name
p_name += ' [[COLOR FFFF5555]'+area+'[/COLOR]/[COLOR FF5555FF]'+geshou+'[/COLOR]]'
li = xbmcgui.ListItem(p_list, iconImage = '', thumbnailImage = p_thumb) #name,area,geshou,fname,page
u = sys.argv[0]+"?mode=7"+"&name="+urllib.quote_plus(p_name)+"&url="+urllib.quote_plus(p_url)+"&thumb="+urllib.quote_plus(p_thumb)+"&page=1"
li.setInfo(type = "Video", infoLabels = {"Title":p_name})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
matchp=re.compile('<div class="page-nav">(.+?)</div>').findall(link)
if len(matchp):
matchp1=re.compile('<a href=".+?>([0-9]+)</a>', re.DOTALL).findall(matchp[0])
plist=[str(page)]
for num in matchp1:
if num not in plist:
plist.append(num)
li = xbmcgui.ListItem("... 第" + num + "页")
u = sys.argv[0]+"?mode=6"+"&name="+urllib.quote_plus(p_name)+"&area="+urllib.quote_plus(area)+"&geshou="+urllib.quote_plus(geshou)+"&fname="+fname+"&page="+num
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True)
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
##################################################################################
# http://www.yinyuetai.com/fanAll?area=ML&property=Girl&page=1
# http://www.yinyuetai.com/fanclub/27
##################################################################################
def listArtistMV(name,url,thumb,page):
# fetch user specified parameters
if page is None: page = 1
p_url = url+'/?page='+str(page)
li = xbmcgui.ListItem('[COLOR FF00FFFF]'+__addonname__+'[/COLOR](第'+str(page)+'页)【[COLOR FF00FF00]'+name+'[/COLOR]】')
# Fetch & build video titles list for user selection, highlight user selected filter
u = sys.argv[0]+"?mode=7&name="+urllib.quote_plus(name)+"&url="+urllib.quote_plus(p_url)+"&thumb="+urllib.quote_plus(thumb)+"&page="+str(page)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True)
link=getHttpData(p_url)
if link == None: return
vlist=re.compile('<div class="mv_list"><ul>(.+?)</ul></div>').findall(link)
match=re.compile('<div class="thumb"><a target="_blank" title="(.+?)" href="(.+?)"><img.+?src="(.+?)"').findall(vlist[0])
if len(match):
totalItems=len(match)
playlist=xbmc.PlayList(0) # use Music playlist for temporary storage
playlist.clear()
j=0
artist=re.compile('<h1>(.+?)</h1>').findall(link)
p_artist = artist[0]
for p_name,p_url,p_thumb in match:
p_url = 'http://www.yinyuetai.com' + p_url
j+=1
p_list = str(j)+'. '+p_name+' ['+p_artist +']'
#p_thumb += '|User-Agent='+UserAgent
p_thumb += '|Referer=http://www.yinyuetai.com'
li = xbmcgui.ListItem(p_list, iconImage = '', thumbnailImage = p_thumb)
li.setInfo(type = "Video", infoLabels = {"Title":p_list, "Artist":p_artist.split(',')})
u = sys.argv[0]+"?mode=10"+"&name="+urllib.quote_plus(p_list)+"&url="+urllib.quote_plus(p_url)+"&thumb="+urllib.quote_plus(p_thumb)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)
playlist.add(p_url, li)
# Fetch and build page selection menu
matchp=re.compile('<div class="page-nav">(.+?)</div>').findall(link)
if len(matchp):
matchp1=re.compile('<a href=".+?>([0-9]+)</a>', re.DOTALL).findall(matchp[0])
plist=[str(page)]
for num in matchp1:
if num not in plist:
plist.append(num)
li = xbmcgui.ListItem("... 第" + num + "页")
u = sys.argv[0]+"?mode=7"+"&name="+urllib.quote_plus(name)+"&url="+urllib.quote_plus(url)+"&thumb="+urllib.quote_plus(thumb)+"&page="+num
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True)
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
##################################################################################
# Routine to update video list as per user selected filters
##################################################################################
def performChangeGs(name,area,geshou,fname,page):
change = False
dialog = xbmcgui.Dialog()
list = [x[1] for x in AREA_LIST]
sel = dialog.select('地区', list)
if sel != -1:
area = AREA_LIST[sel][1]
change = True
list = [x[1] for x in GS_LIST]
sel = dialog.select('歌手', list)
if sel != -1:
geshou = GS_LIST[sel][1]
change = True
list = [chr(i) for i in xrange(ord('A'),ord('Z')+1)]
list.insert(0,'全部')
sel = dialog.select('姓', list)
if sel != -1:
fname = list[sel]
change = True
if change:listArtist(name,area,geshou,fname,1)
##################################################################################
# http://hc.yinyuetai.com/uploads/videos/common/D15E013E4B0CA991DBBD9FCFDECDE167.flv?sc=441d14c6ded1de37&br=776&ptp=mv&rd=yinyuetai.com&json=1
##################################################################################
def get_vurl(url):
link=getHttpData(url)
if link == None:
return url
match=re.compile('\[\{"videoUrl":"(.+?)"').findall(link)
if len(match):
purl = match[0].replace("\\u003d", '=') + "&br=684&ptp=mv&rd=yinyuetai.com&json=1"
link=getHttpData(purl)
if link == None:
return url
else:
matchv=re.compile('"url":"(.+?)"').findall(link)
vurl = re.sub('\\\\\\\\\u003d', '=', matchv[0])
return vurl
else:
return url
##################################################################################
# Continuous Player start playback from user selected video
# User backspace to previous menu will not work - playlist = last selected
##################################################################################
def playVideo(name,url,thumb):
videoplaycont = __addon__.getSetting('video_vplaycont')
playlistA=xbmc.PlayList(0)
playlist=xbmc.PlayList(1)
playlist.clear()
v_pos = int(name.split('.')[0])-1
psize = playlistA.size()
ERR_MAX = psize-1
TRIAL = 1
errcnt = 0
k=0
pDialog = xbmcgui.DialogProgress()
ret = pDialog.create('匹配视频', '请耐心等候! 尝试匹配视频文件 ...')
pDialog.update(0)
for x in range(psize):
# abort if 5 or more access failures and no video playback
if (errcnt >= ERR_MAX and k == 0):
pDialog.close()
dialog = xbmcgui.Dialog()
ok = dialog.ok(__addonname__, '无法播放:多次未匹配到视频文件,请选择其它视频')
break
if x < v_pos: continue
p_item=playlistA.__getitem__(x)
p_url=p_item.getfilename(x)
p_list =p_item.getdescription(x)
# li = xbmcgui.ListItem(p_list)
li = p_item # pass all li items including the embedded thumb image
li.setInfo(type = "Video", infoLabels = {"Title":p_list})
if (pDialog.iscanceled()):
pDialog.close()
x = psize # quickily terminate any old thread
err_cnt = 0
return
pDialog.update(errcnt*100/ERR_MAX + 100/ERR_MAX/TRIAL)
if re.search('http://www.yinyuetai.com/', p_url) or re.search('http://v.yinyuetai.com/video/', p_url):
v_url = get_flv_url(p_url)
if len == None:
errcnt += 1 # increment consequetive unsuccessful access
print "error cnt: " + str(errcnt)
continue
playlistA.remove(p_url) # remove old url
playlistA.add(v_url, li, x) # keep a copy of v_url in Audio Playlist
elif re.search('http://v.yinyuetai.com/playlist', p_url):
v_url = get_vurl(p_url)
if v_url == None:
errcnt += 1 # increment consequetive unsuccessful access
continue
playlistA.remove(p_url) # remove old url
playlistA.add(v_url, li, x) # keep a copy of v_url in Audio Playlist
else:
v_url = p_url
err_cnt = 0 # reset error count
playlist.add(v_url, li, k)
k +=1
if k == 1:
pDialog.close()
xbmc.Player(1).play(playlist)
if videoplaycont == 'false': break
##################################################################################
def playVideoX(name,url,thumb):
v_url = get_flv_url(url)
if v_url:
playlist=xbmc.PlayList(1)
playlist.clear()
listitem = xbmcgui.ListItem(name, thumbnailImage = thumb)
listitem.setInfo(type="Video",infoLabels={"Title":name})
playlist.add(v_url, listitem)
xbmc.Player().play(playlist)
else:
if link.find('该视频为加密视频')>0:
dialog = xbmcgui.Dialog()
ok = dialog.ok(__addonname__, '无法播放:该视频为加密视频')
elif link.find('解析失败,请确认视频是否被删除')>0:
dialog = xbmcgui.Dialog()
ok = dialog.ok(__addonname__, '无法播放:该视频或为收费节目')
##################################################################################
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
##################################################################################
params=get_params()
url=None
mode=None
name=None
area=None
artist=None
version=None
tag=None
genre=None
geshou=None
cat=None
fname=None
order=None
date=None
page=None
thumb=None
listpage=None
timelist=None
try:
mode=int(params["mode"])
except:
pass
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
area=urllib.unquote_plus(params["area"])
except:
pass
try:
artist=urllib.unquote_plus(params["artist"])
except:
pass
try:
version=urllib.unquote_plus(params["version"])
except:
pass
try:
tag=urllib.unquote_plus(params["tag"])
except:
pass
try:
genre=urllib.unquote_plus(params["genre"])
except:
pass
try:
fname=urllib.unquote_plus(params["fname"])
except:
pass
try:
cat=urllib.unquote_plus(params["cat"])
except:
pass
try:
geshou=urllib.unquote_plus(params["geshou"])
except:
pass
try:
order=urllib.unquote_plus(params["order"])
except:
pass
try:
date=urllib.unquote_plus(params["date"])
except:
pass
try:
thumb=urllib.unquote_plus(params["thumb"])
except:
pass
try:
page=urllib.unquote_plus(params["page"])
except:
pass
try:
listpage=urllib.unquote_plus(params["listpage"])
except:
pass
try:
timelist=urllib.unquote_plus(params["timelist"])
except:
pass
ctl = {
None : ('MainMenu(ctl)','音悦台MV',(2,8,1,3,4,5,6)),
1 : ('listVChart(name,area,date,timelist)','音悦台 - 音悦V榜','',True),
2 : ('listFocusMV(name,url,cat)','音悦台 - MV首播','/ajax/shoubo?area=',True),
3 : ('listAllMV(name,url,area,artist,version,tag, genre,fname,order,page,listpage)','音悦台 - 全部MV','/mv/all',True),
4 : ('listRecommendMV(name, page)','音悦台 - 推荐MV','/ajax/recommend-list?page=1',True),
5 : ('listFavouriteMV(name,cat,order,page)','音悦台 - 全部悦单','/pl/playlist_newRecommend',True),
6 : ('listArtist(name,area,geshou,fname,page)','音悦台 - 歌手','/fanAll',True),
7 : ('listArtistMV(name,url,thumb,page)','显示歌手MV','/fanAll',True),
8 : ('listFocusMV(name,url,cat)','音悦台 - 正在流行','/ajax/zhengliuxing?area=',True),
9 : ('listFocusMV(name,url,cat)','音悦台 - V榜','/ajax/vchart?area=',True),
10 : ('playVideo(name,url,thumb)',''),
11 : ('performChangeVChart(name,area,date,timelist)',''),
12 : ('performChangeFocus(name,url,cat)',''),
13 : ('performChangesAllMV(name,url,area,artist,version,tag, genre,fname,order,page,listpage)',''),
14 : ('performChangesMV(name,area,page)',''),
15 : ('performChangeFavourite(name,cat,order,page)',''),
16 : ('performChangeGs(name,area,geshou,fname,page)','')
}
exec(ctl[mode][0])
| gpl-2.0 |
orb-framework/orb | tests/functional/mysql/test_my_api.py | 2 | 11003 | import pytest
def test_my_api_select_bob(orb, my_sql, my_db, User):
record = User.select(where=orb.Query('username') == 'bob').first()
assert record is not None and record.get('username') == 'bob'
def test_my_api_save_bill(orb, my_db, User):
user = User({
'username': 'bill',
'password': 'T3st1ng!'
})
user.save()
assert user.id() is not None
assert user.isRecord() == True
assert user.get('user_type_id') == 1
assert user.get('user_type.code') == 'basic'
def test_my_api_fetch_bill(orb, my_db, User):
user = User.byUsername('bill')
assert user is not None
id = user.id()
user = User(id)
assert user is not None
user = User.fetch(id)
assert user is not None
def test_my_api_delete_bill(orb, my_db, User):
user = User.byUsername('bill')
assert user and user.isRecord()
user.delete()
assert not user.isRecord()
user_again = User.byUsername('bill')
assert user_again is None
def test_my_api_update_bob(orb, my_sql, my_db, User):
record = User.select(where=orb.Query('username') == 'bob').first()
assert record is not None
assert record.get('username') == 'bob'
st = my_sql.statement('UPDATE')
conn = my_db.connection()
# set to tim
record.set('username', 'tim')
sql, data = st([record])
result, count = conn.execute(sql, data)
record_tim = User.select(where=orb.Query('username') == 'tim').first()
assert record_tim is not None
assert record_tim.id() == record.id()
# set back to bob
record_tim.set('username', 'bob')
sql, data = st([record_tim])
result, count = conn.execute(sql, data)
record_bob = User.select(where=orb.Query('username') == 'bob').first()
assert record_bob is not None
assert record_bob.id() == record.id() and record_bob.id() == record_tim.id()
def test_my_api_create_admins(orb, User, GroupUser, Group):
user = User.byUsername('bob')
assert user is not None and user.get('username') == 'bob'
group = Group.ensureExists({'name': 'admins'})
assert group is not None
group_user = GroupUser.ensureExists({'group': group, 'user': user})
assert group_user.isRecord() == True
def test_my_api_get_user_groups(orb, User):
user = User.byUsername('bob')
assert user is not None
groups = user.get('groups')
assert len(groups) == 1
def test_my_api_get_group_users(orb, Group):
grp = Group.select(where=orb.Query('name') == 'admins').first()
assert grp is not None and grp.get('name') == 'admins'
users = grp.get('users')
assert len(users) == 1
assert users[0].get('username') == 'bob'
def test_my_api_get_group_users_reverse(orb, User, Group):
bob = User.byUsername('bob')
assert len(bob.get('userGroups')) == 1
admins = Group.byName('admins')
assert len(admins.get('groupUsers')) == 1
def test_my_api_get_group_users_by_unique_index(orb, GroupUser, User, Group):
u = User.byUsername('bob')
g = Group.byName('admins')
admin = GroupUser.byUserAndGroup(u, g)
assert admin is not None
def test_my_api_get_group_users_by_index(orb, GroupUser, User):
u = User.byUsername('bob')
users = GroupUser.byUser(u)
assert len(users) == 1
assert users[0].user() == u
def test_my_api_select_with_join(orb, Group, User, GroupUser):
q = orb.Query('id') == orb.Query(GroupUser, 'user')
q &= orb.Query(GroupUser, 'group') == orb.Query(Group, 'id')
q &= orb.Query(Group, 'name') == 'admins'
records = User.select(where=q)
assert len(records) == 1
assert records[0].get('username') == 'bob'
def test_my_api_select_standard_with_shortcut(orb, GroupUser):
q = orb.Query('group.name') == 'admins'
records = GroupUser.select(where=q)
assert len(records) == 1
assert records[0].get('user.username') == 'bob'
def test_my_api_select_reverse_with_shortcut(orb, User):
q = orb.Query('userGroups.group.name') == 'admins'
records = User.select(where=q)
assert len(records) == 1
assert records[0].get('username') == 'bob'
def test_my_api_select_pipe_with_shortcut(orb, User):
q = orb.Query('groups.name') == 'admins'
records = User.select(where=q)
assert len(records) == 1
assert records[0].get('username') == 'bob'
def test_my_api_expand(orb, GroupUser):
group_user = GroupUser.select(expand='user').first()
assert group_user is not None
def test_my_api_expand_pipe(orb, User):
groups = User.byUsername('bob', expand='groups').get('groups')
assert len(groups) == 1
for group in groups:
assert group.id() is not None
def test_my_api_expand_lookup(orb, User):
userGroups = User.byUsername('bob', expand='userGroups').get('userGroups')
assert len(userGroups) == 1
for userGroup in userGroups:
assert userGroup.get('user_id') is not None
def test_my_api_expand_json(orb, GroupUser):
group_user = GroupUser.select(expand='user').first()
jdata = group_user.__json__()
assert jdata['user_id'] == jdata['user']['id']
def test_my_api_expand_complex_json(orb, User):
user = User.byUsername('bob', expand='groups,userGroups,userGroups.group')
jdata = user.__json__()
assert jdata['groups'][0]['name'] == 'admins'
assert jdata['userGroups'][0]['user_id'] == jdata['id']
assert jdata['userGroups'][0]['group']['name'] == 'admins'
def test_my_api_collection_insert(orb, Group):
records = orb.Collection((Group({'name': 'Test A'}), Group({'name': 'Test B'})))
records.save()
assert records[0].id() is not None
assert records[1].id() is not None
test_a = Group.byName('Test A')
test_b = Group.byName('Test B')
assert records[0].id() == test_a.id()
assert records[1].id() == test_b.id()
def test_my_api_collection_delete(orb, Group):
records = Group.select(where=orb.Query('name').in_(('Test A', 'Test B')))
assert len(records) == 2
assert records.delete() == 2
def test_my_api_collection_delete_empty(orb, User):
users = User.select(where=orb.Query('username') == 'missing')
assert users.delete() == 0
def test_my_api_collection_has_record(orb, User):
users = User.all()
assert users.has(User.byUsername('bob'))
def test_my_api_collection_iter(orb, User):
records = User.select()
for record in records:
assert record.isRecord()
def test_my_api_collection_invalid_index(orb, User):
records = User.select()
with pytest.raises(IndexError):
records[50]
def test_my_api_collection_ids(orb, User):
records = User.select().records(order='+id')
ids = User.select().ids(order='+id')
for i, record in enumerate(records):
assert record.id() == ids[i]
def test_my_api_collection_index(orb, User):
users = User.select()
urecords = users.records()
assert users.index(urecords[0]) == 0
assert users.index(None) == -1
with pytest.raises(ValueError):
assert users.index(User()) == -1
with pytest.raises(ValueError):
assert User.select().index(User())
def test_my_api_collection_loaded(orb, User):
users = orb.Collection(model=User)
assert not users.isLoaded()
assert not users.isNull()
null_users = orb.Collection()
assert null_users.isNull()
def test_my_api_collection_empty(orb, User):
users = orb.Collection()
assert users.isEmpty()
users = User.select(where=orb.Query('username') == 'billy')
assert users.isEmpty()
def test_my_api_collection_itertool(orb, User):
for user in User.select(inflated=False):
assert user['id'] is not None
def test_my_api_select_columns(orb, User):
data = User.select(columns='username', returning='values').records()
assert type(data) == list
assert 'bob' in data
assert 'sally' in data
def test_my_api_select_colunms_json(orb, User):
data = User.select(columns='username', returning='values').__json__()
assert type(data) == list
assert 'bob' in data
assert 'sally' in data
def test_my_api_select_multiple_columns(orb, User):
data = list(User.select(columns=['id', 'username'], returning='values'))
assert type(data) == list
assert type(data[0]) == tuple
assert (1, 'bob') in data
def test_my_api_save_multi_i18n(orb, Document):
doc = Document()
with orb.Context(locale='en_US'):
assert doc.context().locale == 'en_US'
doc.save({'title': 'Fast'})
with orb.Context(locale='es_ES'):
assert doc.context().locale == 'es_ES'
doc.set('title', 'Rapido')
doc.save()
def test_my_api_load_multi_i18n(orb, Document):
with orb.Context(locale='en_US'):
doc_en = Document.select().last()
with orb.Context(locale='es_ES'):
doc_sp = Document.select(locale='es_ES').last()
assert doc_en.get('title') == 'Fast'
assert doc_sp.get('title') == 'Rapido'
assert doc_en.id() == doc_sp.id()
def test_my_api_load_multi_i18n_with_search(orb, Document):
with orb.Context(locale='en_US'):
docs_en = Document.select(where=orb.Query('title') == 'Fast')
with orb.Context(locale='es_ES'):
docs_sp = Document.select(where=orb.Query('title') == 'Rapido')
assert len(docs_en) == len(docs_sp)
assert docs_en[0].get('title') == 'Fast'
assert docs_sp[0].get('title') == 'Rapido'
assert len(set(docs_sp.values('id')).difference(docs_en.values('id'))) == 0
def test_my_api_invalid_reference(orb, Employee, User):
user = User()
employee = Employee()
with pytest.raises(orb.errors.InvalidReference):
employee.set('role', user)
employee.validate(columns=['role'])
def test_my_api_save_employee(orb, Employee, Role):
# TODO: support inserting inherited records
pass
# role = Role.ensureExists({'name': 'Programmer'})
# sam = Employee.byUsername('samantha')
# if not sam:
# sam = Employee({
# 'username': 'samantha',
# 'password': 'T3st1ng!',
# 'role': role
# })
# sam.save()
#
# assert sam.get('username') == 'samantha'
# assert sam.get('role') == role
def test_my_api_save_hash_id(orb, Comment):
comment = Comment({'text': 'Testing'})
comment.save()
assert isinstance(comment.id(), unicode)
def test_my_api_restore_hash_id(orb, Comment):
comment = Comment.select().last()
assert isinstance(comment.id(), unicode)
def test_my_api_reference_hash_id(orb, Comment, Attachment):
comment = Comment.select().last()
attachment = Attachment({'filename': '/path/to/somewhere', 'comment': comment})
attachment.save()
assert isinstance(attachment.get('comment_id'), unicode)
def test_my_expand_virtual(orb, GroupUser, User):
gu = GroupUser.select().first().get('user')
u = User.select(where=orb.Query('id') == gu, expand='my_groups.users,groups.users').first()
json = u.__json__()
assert len(json['groups']) == len(json['my_groups'])
assert len(json['groups'][0]['users']) == len(json['my_groups'][0]['users'])
| mit |
olexiim/edx-platform | common/djangoapps/course_about/tests/test_views.py | 8 | 6520 | """
Tests for user enrollment.
"""
import ddt
import json
import unittest
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from django.conf import settings
from datetime import datetime
from mock import patch
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase, mixed_store_config
)
from xmodule.modulestore.tests.factories import CourseFactory, CourseAboutFactory
from student.tests.factories import UserFactory
from course_about.serializers import course_image_url
from course_about import api
from course_about.errors import CourseNotFoundError, CourseAboutError
from xmodule.modulestore.django import modulestore
# Since we don't need any XML course fixtures, use a modulestore configuration
# that disables the XML modulestore.
MODULESTORE_CONFIG = mixed_store_config(settings.COMMON_TEST_DATA_ROOT, {}, include_xml=False)
@ddt.ddt
@override_settings(MODULESTORE=MODULESTORE_CONFIG)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CourseInfoTest(ModuleStoreTestCase, APITestCase):
"""
Test course information.
"""
USERNAME = "Bob"
EMAIL = "bob@example.com"
PASSWORD = "edx"
def setUp(self):
""" Create a course"""
super(CourseInfoTest, self).setUp()
self.course = CourseFactory.create()
self.user = UserFactory.create(username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
def test_user_not_authenticated(self):
# Log out, so we're no longer authenticated
self.client.logout()
resp_data, status_code = self._get_course_about(self.course.id)
self.assertEqual(status_code, status.HTTP_200_OK)
self.assertIsNotNone(resp_data)
def test_with_valid_course_id(self):
_resp_data, status_code = self._get_course_about(self.course.id)
self.assertEqual(status_code, status.HTTP_200_OK)
def test_with_invalid_course_id(self):
resp = self.client.get(
reverse('courseabout', kwargs={"course_id": 'not/a/validkey'})
)
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_get_course_details_all_attributes(self):
kwargs = dict()
kwargs["course_id"] = self.course.id
kwargs["course_runtime"] = self.course.runtime
CourseAboutFactory.create(**kwargs)
resp_data, status_code = self._get_course_about(self.course.id)
all_attributes = ['display_name', 'start', 'end', 'announcement', 'advertised_start', 'is_new', 'course_number',
'course_id',
'effort', 'media', 'course_image']
for attr in all_attributes:
self.assertIn(attr, str(resp_data))
self.assertEqual(status_code, status.HTTP_200_OK)
def test_get_course_about_valid_date(self):
module_store = modulestore()
self.course.start = datetime.now()
self.course.end = datetime.now()
self.course.announcement = datetime.now()
module_store.update_item(self.course, self.user.id)
resp_data, _status_code = self._get_course_about(self.course.id)
self.assertIsNotNone(resp_data["start"])
self.assertIsNotNone(resp_data["end"])
self.assertIsNotNone(resp_data["announcement"])
def test_get_course_about_none_date(self):
module_store = modulestore()
self.course.start = None
self.course.end = None
self.course.announcement = None
module_store.update_item(self.course, self.user.id)
resp_data, _status_code = self._get_course_about(self.course.id)
self.assertIsNone(resp_data["start"])
self.assertIsNone(resp_data["end"])
self.assertIsNone(resp_data["announcement"])
def test_get_course_details(self):
kwargs = dict()
kwargs["course_id"] = self.course.id
kwargs["course_runtime"] = self.course.runtime
kwargs["user_id"] = self.user.id
CourseAboutFactory.create(**kwargs)
resp_data, status_code = self._get_course_about(self.course.id)
self.assertEqual(status_code, status.HTTP_200_OK)
self.assertEqual(unicode(self.course.id), resp_data['course_id'])
self.assertIn('Run', resp_data['display_name'])
url = course_image_url(self.course)
self.assertEquals(url, resp_data['media']['course_image'])
@patch.object(api, "get_course_about_details")
def test_get_enrollment_course_not_found_error(self, mock_get_course_about_details):
mock_get_course_about_details.side_effect = CourseNotFoundError("Something bad happened.")
_resp_data, status_code = self._get_course_about(self.course.id)
self.assertEqual(status_code, status.HTTP_404_NOT_FOUND)
@patch.object(api, "get_course_about_details")
def test_get_enrollment_invalid_key_error(self, mock_get_course_about_details):
mock_get_course_about_details.side_effect = CourseNotFoundError('a/a/a', "Something bad happened.")
resp_data, status_code = self._get_course_about(self.course.id)
self.assertEqual(status_code, status.HTTP_404_NOT_FOUND)
self.assertIn('An error occurred', resp_data["message"])
@patch.object(api, "get_course_about_details")
def test_get_enrollment_internal_error(self, mock_get_course_about_details):
mock_get_course_about_details.side_effect = CourseAboutError('error')
resp_data, status_code = self._get_course_about(self.course.id)
self.assertEqual(status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
self.assertIn('An error occurred', resp_data["message"])
@override_settings(COURSE_ABOUT_DATA_API='foo')
def test_data_api_config_error(self):
# Retrive the invalid course
resp_data, status_code = self._get_course_about(self.course.id)
self.assertEqual(status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
self.assertIn('An error occurred', resp_data["message"])
def _get_course_about(self, course_id):
"""
helper function to get retrieve course about information.
args course_id (str): course id
"""
resp = self.client.get(
reverse('courseabout', kwargs={"course_id": unicode(course_id)})
)
return json.loads(resp.content), resp.status_code
| agpl-3.0 |
coursera-dl/coursera-dl | coursera/parallel.py | 3 | 2039 | import abc
import logging
import traceback
from multiprocessing.dummy import Pool
class AbstractDownloader(object):
"""
Base class for download wrappers. Two methods should be implemented:
`download` and `join`.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, file_downloader):
super(AbstractDownloader, self).__init__()
self._file_downloader = file_downloader
@abc.abstractmethod
def download(self, *args, **kwargs):
raise NotImplementedError()
@abc.abstractmethod
def join(self):
raise NotImplementedError()
def _download_wrapper(self, url, *args, **kwargs):
"""
Actual download call. Calls the underlying file downloader,
catches all exceptions and returns the result.
"""
try:
return url, self._file_downloader.download(url, *args, **kwargs)
except Exception as e:
logging.error("AbstractDownloader: %s", traceback.format_exc())
return url, e
class ConsecutiveDownloader(AbstractDownloader):
"""
This class calls underlying file downloader in a sequential order
in the same thread where it was created.
"""
def download(self, callback, url, *args, **kwargs):
_, result = self._download_wrapper(url, *args, **kwargs)
callback(url, result)
return result
def join(self):
pass
class ParallelDownloader(AbstractDownloader):
"""
This class uses threading.Pool to run download requests in parallel.
"""
def __init__(self, file_downloader, processes=1):
super(ParallelDownloader, self).__init__(file_downloader)
self._pool = Pool(processes=processes)
def download(self, callback, url, *args, **kwargs):
callback_wrapper = lambda payload: callback(*payload)
return self._pool.apply_async(
self._download_wrapper, (url,) + args, kwargs,
callback=callback_wrapper)
def join(self):
self._pool.close()
self._pool.join()
| lgpl-3.0 |
bepatient-fr/ikaaro | ikaaro/emails/emails.py | 3 | 4797 | # -*- coding: UTF-8 -*-
# Copyright (C) 2011 Juan David Ibáñez Palomar <jdavid@itaapy.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from itools
from itools.core import prototype
from itools.gettext import MSG
###########################################################################
# Base classes
###########################################################################
class Email(prototype):
subject = None
text = None
def get_text_namespace(self, context):
host_uri = str(context.uri.resolve('/'))[:-1]
namespace = {
'host': host_uri,
'host_title': context.root.get_title()}
return namespace
def get_text(self, context):
namespace = self.get_text_namespace(context)
return self.text.gettext(**namespace)
# Registry
emails_registry = {}
# Public API
def register_email(cls):
emails_registry[cls.class_id] = cls
def send_email(email_id, context, to_addr, **kw):
email = emails_registry.get(email_id)
if email:
kw['to_addr'] = to_addr
email = email(**kw)
text = email.get_text(context)
context.root.send_email(to_addr, email.subject, text=text)
###########################################################################
# User email
###########################################################################
class User_Email(Email):
user = None
def get_text_namespace(self, context):
namespace = super(User_Email, self).get_text_namespace(context)
# User specific information
user = self.user
if user:
namespace['user'] = namespace['host'] + str(user.abspath)
namespace['userid'] = user.get_login_name()
user_state = user.get_property('user_state')
namespace['userkey'] = user_state.get_parameter('key')
return namespace
class User_AskForConfirmation(User_Email):
"""This email asks the user to confirm his subscription to the web site.
It is send in two conditions:
- When he registers through the registration form
- When the administrator registers him through the back-office
"""
class_id = 'user-ask-for-confirmation'
subject = MSG(u'Registration confirmation')
text = MSG(
u'To confirm your identity, follow this link:\n'
u'\n'
u' {user}/;confirm_registration?username={userid}&key={userkey}')
class AddUser_SendNotification(User_Email):
class_id = 'add-user-send-notification'
subject = MSG(u'Registration notification')
text = MSG(
u'You have been registered to the "{host_title}" site:\n'
u'\n'
u' {host}/')
class Register_AlreadyRegistered(User_Email):
class_id = 'register-already-registered'
subject = MSG(u"Already registered")
text = MSG(
u'You already have an account:\n'
u'\n'
u' {host}/;login?loginname={userid}')
class Register_SendConfirmation(User_Email):
class_id = 'register-send-confirmation'
subject = MSG(u"Registration confirmed")
text = MSG(
u'You have been registered to the "{host_title}" site:\n'
u'\n'
u' {host}/')
class ForgottenPassword_AskForConfirmation(User_Email):
class_id = 'forgotten-password-ask-for-confirmation'
subject = MSG(u"Choose a new password")
text = MSG(
u'To choose a new password, click the link:\n'
u'\n'
u' {user}/;change_password_forgotten?username={userid}&key={userkey}')
class SwitchState_Activate(User_Email):
class_id = 'switch-state-activate'
subject = MSG(u'Your account has been re-activated')
text = MSG(u'Your account has been re-activated')
class SwitchState_Deactivate(User_Email):
class_id = 'switch-state-deactivate'
subject = MSG(u'Your account has been canceled')
text = MSG(u'Your account has been canceled')
# Registry
register_email(User_AskForConfirmation)
register_email(AddUser_SendNotification)
register_email(Register_AlreadyRegistered)
#register_email(Register_SendConfirmation)
register_email(ForgottenPassword_AskForConfirmation)
register_email(SwitchState_Activate)
register_email(SwitchState_Deactivate)
| gpl-3.0 |
matteo88/gasistafelice | gasistafelice/rest/views/blocks/gas_users.py | 3 | 4151 | from django.contrib.auth.models import User
from gasistafelice.rest.views.blocks import users
from flexi_auth.models import ParamRole
from gasistafelice.consts import GAS_MEMBER
from registration.models import RegistrationProfile
from gasistafelice.gas.forms.base import GASSingleUserForm
from django.forms.formsets import formset_factory
from gasistafelice.lib.formsets import BaseFormSetWithRequest
#------------------------------------------------------------------------------#
# #
#------------------------------------------------------------------------------#
class Block(users.Block):
BLOCK_NAME = "gas_users"
BLOCK_VALID_RESOURCE_TYPES = ["gas"]
COLUMN_INDEX_NAME_MAP = users.Block.COLUMN_INDEX_NAME_MAP
COLUMN_INDEX_NAME_MAP[9] = 'person__gasmember__is_suspended'
def _get_resource_list(self, request):
"""Retrieve all users who are GAS Members and have confirmed their emails.
IMPORTANT: retrieve also suspended GAS Members.
"""
# User list
users_email_confirmed = User.objects.filter(registrationprofile__activation_key=RegistrationProfile.ACTIVATED)
users = users_email_confirmed.filter(person__gasmember__gas=request.resource)
users = users.extra(select={"is_active_in_this_gas" : "NOT gas_gasmember.is_suspended" })
# for u in users:
# print("%s is_active_in_this_gas=%s" % (u, u.is_active_in_this_gas))
return users.order_by('last_name', 'first_name')
# WAS: is a GASMember is suspended -> he has no role
# WAS: pr = ParamRole.get_role(GAS_MEMBER, gas=request.resource)
# WAS: users = pr.get_users()
# WAS: return users.order_by('last_name', 'first_name')
def _get_edit_multiple_form_class(self):
qs = self._get_resource_list(self.request)
return formset_factory(
form=GASSingleUserForm,
formset=BaseFormSetWithRequest,
extra=qs.count() #0
)
def _get_records(self, request, querySet):
"""Return records of rendered table fields."""
#TODO: refactoring needed with superclass method.
data = {}
i = 0
c = querySet.count()
map_info = { }
av = True
for i,el in enumerate(querySet):
key_prefix = 'form-%d' % i
try:
el._cached_p = el.person
except Person.DoesNotExist as e:
el._cached_p = None
data.update({
'%s-id' % key_prefix : el.pk,
'%s-pk' % key_prefix : el.pk,
'%s-is_active' % key_prefix : bool(el.is_active),
'%s-person' % key_prefix : el._cached_p,
'%s-gm_is_active' % key_prefix : el.is_active_in_this_gas,
'%s-initial_gm_is_active' % key_prefix : el.is_active_in_this_gas,
})
map_info[el.pk] = {'formset_index' : i}
data['form-TOTAL_FORMS'] = c
data['form-INITIAL_FORMS'] = c
data['form-MAX_NUM_FORMS'] = 0
formset = self._get_edit_multiple_form_class()(request, data)
records = []
for i, el in enumerate(querySet):
form = formset[map_info[el.pk]['formset_index']]
if el._cached_p:
person = el._cached_p
person_urn = el._cached_p.urn
else:
person = form['person']
person_urn = None
records.append({
'id' : "%s %s" % (form['pk'], form['id']),
'username' : el.username,
'first_name' : el.first_name,
'last_name' : el.last_name,
'email' : el.email,
'last_login' : el.last_login,
'date_joined' : el.date_joined,
'is_active' : form['is_active'],
'person' : person,
'person_urn': person_urn,
'is_active_in_this_gas': "%s %s" % (form['gm_is_active'], form['initial_gm_is_active'])
})
return formset, records, {}
| agpl-3.0 |
abacusresearch/gitflow | gitflow/procedures/scheme/scheme_procedures.py | 1 | 10745 | import os
from typing import Optional
import semver
from gitflow import _, version
from gitflow.common import Result
from gitflow.const import VersioningScheme
from gitflow.version import VersionConfig
def filter_sequence_number(version_config, prev_version, global_seq):
if version_config.versioning_scheme == VersioningScheme.SEMVER_WITH_SEQ:
if prev_version is not None:
version_seq = semver.parse_version_info(prev_version).prerelease
if version_seq is not None:
# must throw if not an integer
version_seq = int(version_seq)
if global_seq is None:
raise ValueError('the version sequence number is defined, while the global one is not')
if version_seq > global_seq:
raise ValueError('the version sequence number is greater than the global sequence number')
if global_seq is None:
global_seq = 0
else:
global_seq = None
return global_seq
def version_bump_major(version_config: VersionConfig, version: Optional[str], global_seq: Optional[int]):
result = Result()
try:
global_seq = filter_sequence_number(version_config, version, global_seq)
except ValueError as e:
result.error(os.EX_DATAERR, "version increment failed", str(e))
if not result.has_errors():
version_info = semver.parse_version_info(
semver.bump_major(version)) if version is not None else semver.parse_version_info("0.0.0")
pre_release = True
result.value = semver.format_version(
version_info.major,
version_info.minor,
version_info.patch,
(version_config.qualifiers[0] + ".1" if pre_release else None)
if version_config.versioning_scheme != VersioningScheme.SEMVER_WITH_SEQ
else global_seq + 1,
None)
return result
def version_bump_minor(version_config: VersionConfig, version: Optional[str], global_seq: Optional[int]):
result = Result()
try:
global_seq = filter_sequence_number(version_config, version, global_seq)
except ValueError as e:
result.error(os.EX_DATAERR, "version increment failed", str(e))
if not result.has_errors():
version_info = semver.parse_version_info(
semver.bump_minor(version)) if version is not None else semver.parse_version_info("0.0.0")
pre_release = True
result.value = semver.format_version(
version_info.major,
version_info.minor,
version_info.patch,
(version_config.qualifiers[0] + ".1" if pre_release else None)
if version_config.versioning_scheme != VersioningScheme.SEMVER_WITH_SEQ
else global_seq + 1,
None)
return result
def version_bump_patch(version_config: VersionConfig, version: Optional[str], global_seq: Optional[int]):
result = Result()
try:
global_seq = filter_sequence_number(version_config, version, global_seq)
except ValueError as e:
result.error(os.EX_DATAERR, "version increment failed", str(e))
if not result.has_errors():
version_info = semver.parse_version_info(
semver.bump_patch(version)) if version is not None else semver.parse_version_info("0.0.0")
pre_release = True
result.value = semver.format_version(
version_info.major,
version_info.minor,
version_info.patch,
(version_config.qualifiers[0] + ".1" if pre_release else None)
if version_config.versioning_scheme != VersioningScheme.SEMVER_WITH_SEQ
else global_seq + 1,
None)
return result
def version_bump_qualifier(version_config: VersionConfig, version: Optional[str], global_seq: Optional[int]):
result = Result()
version_info = semver.parse_version_info(version) if version is not None else semver.parse_version_info("0.0.0")
new_qualifier = None
if not version_config.qualifiers:
result.error(os.EX_USAGE,
_("Failed to increment the pre-release qualifier of version {version}.")
.format(version=repr(version)),
_("The version scheme does not contain qualifiers")
)
return result
if version_info.prerelease:
prerelease_version_elements = version_info.prerelease.split(".")
qualifier = prerelease_version_elements[0]
qualifier_index = version_config.qualifiers.index(qualifier) if qualifier in version_config.qualifiers else -1
if qualifier_index < 0:
result.error(os.EX_DATAERR,
_("Failed to increment the pre-release qualifier of version {version}.")
.format(version=repr(version)),
_("The current qualifier is invalid: {qualifier}")
.format(qualifier=repr(qualifier)))
else:
qualifier_index += 1
if qualifier_index < len(version_config.qualifiers):
new_qualifier = version_config.qualifiers[qualifier_index]
else:
result.error(os.EX_DATAERR,
_("Failed to increment the pre-release qualifier {qualifier} of version {version}.")
.format(qualifier=qualifier, version=repr(version)),
_("There are no further qualifiers with higher precedence, configured qualifiers are:\n"
"{listing}\n"
"The sub command 'bump-to-release' may be used for a final bump.")
.format(
listing='\n'.join(' - ' + repr(qualifier) for qualifier in version_config.qualifiers))
)
else:
result.error(os.EX_DATAERR,
_("Failed to increment the pre-release qualifier of version {version}.")
.format(version=version),
_("Pre-release increments cannot be performed on release versions."))
if not result.has_errors() and new_qualifier is not None:
result.value = semver.format_version(
version_info.major,
version_info.minor,
version_info.patch,
new_qualifier + ".1",
None)
return result
def version_bump_prerelease(version_config: VersionConfig, version: Optional[str], global_seq: Optional[int]):
result = Result()
version_info = semver.parse_version_info(version) if version is not None else semver.parse_version_info("0.0.0")
if version_info.prerelease:
prerelease_version_elements = version_info.prerelease.split(".")
if len(prerelease_version_elements) > 0 and prerelease_version_elements[0].upper() == "SNAPSHOT":
if len(prerelease_version_elements) == 1:
result.error(os.EX_DATAERR,
_("The pre-release increment has been skipped."),
_("In order to retain Maven compatibility, "
"the pre-release component of snapshot versions must not be versioned."))
else:
result.error(os.EX_DATAERR,
_("Failed to increment the pre-release component of version {version}.")
.format(version=repr(version)),
_("Snapshot versions must not have a pre-release version."))
result.value = version
elif len(prerelease_version_elements) == 1:
if version_config.versioning_scheme != VersioningScheme.SEMVER_WITH_SEQ:
result.error(os.EX_DATAERR,
_("Failed to increment the pre-release component of version {version}.")
.format(version=repr(version)),
_("The qualifier {qualifier} must already be versioned.")
.format(qualifier=repr(prerelease_version_elements[0]))
)
result.value = semver.bump_prerelease(version)
else:
result.error(os.EX_DATAERR,
_("Failed to increment the pre-release component of version {version}.")
.format(version=repr(version)),
_("Pre-release increments cannot be performed on release versions.")
)
if result.has_errors():
result.value = None
elif result.value is not None and not semver.compare(result.value, version) > 0:
result.value = None
if not result.value:
result.error(os.EX_SOFTWARE,
_("Failed to increment the pre-release of version {version} for unknown reasons.")
.format(version=repr(version)),
None)
return result
def version_bump_to_release(version_config: VersionConfig, version: Optional[str], global_seq: Optional[int]):
result = Result()
version_info = semver.parse_version_info(version) if version is not None else semver.parse_version_info("0.0.0")
if version_config.versioning_scheme == VersioningScheme.SEMVER_WITH_SEQ:
result.error(os.EX_USAGE,
_("Failed to increment version to release: {version}.")
.format(version=repr(version)),
_("Sequential versions cannot be release versions."))
return result
if not version_info.prerelease:
result.error(os.EX_DATAERR,
_("Failed to increment version to release: {version}.")
.format(version=repr(version)),
_("Only pre-release versions can be incremented to a release version."))
if not result.has_errors():
result.value = semver.format_version(
version_info.major,
version_info.minor,
version_info.patch,
None,
None)
return result
class VersionSet(object):
__new_version = None
def __init__(self, new_version):
self.__new_version = new_version
def __call__(self, version_config: VersionConfig, old_version: Optional[str], global_seq: Optional[int]):
result = Result()
result.add_subresult(version.validate_version(version_config, self.__new_version))
result.value = self.__new_version
return result
def get_sequence_number(version_config: VersionConfig, new_version_info: semver.VersionInfo):
if version_config.versioning_scheme == VersioningScheme.SEMVER_WITH_SEQ:
return int(new_version_info.prerelease)
else:
return None
| mit |
floren/linux-kvm | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
openstack/poppy | poppy/provider/mock/controllers.py | 2 | 1046 | # Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exports Sample CDN controllers.
Field Mappings:
In order to reduce the disk / memory space used,
fields name will be, most of the time, the first
letter of their long name. Fields mapping will be
updated and documented in each controller class.
"""
from poppy.provider.mock import certificates
from poppy.provider.mock import services
CertificateController = certificates.CertificateController
ServiceController = services.ServiceController
| apache-2.0 |
maxdeliso/elevatorSim | Lib/test/test_exception_variations.py | 182 | 4036 |
from test.support import run_unittest
import unittest
class ExceptionTestCase(unittest.TestCase):
def test_try_except_else_finally(self):
hit_except = False
hit_else = False
hit_finally = False
try:
raise Exception('nyaa!')
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertTrue(hit_except)
self.assertTrue(hit_finally)
self.assertFalse(hit_else)
def test_try_except_else_finally_no_exception(self):
hit_except = False
hit_else = False
hit_finally = False
try:
pass
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertFalse(hit_except)
self.assertTrue(hit_finally)
self.assertTrue(hit_else)
def test_try_except_finally(self):
hit_except = False
hit_finally = False
try:
raise Exception('yarr!')
except:
hit_except = True
finally:
hit_finally = True
self.assertTrue(hit_except)
self.assertTrue(hit_finally)
def test_try_except_finally_no_exception(self):
hit_except = False
hit_finally = False
try:
pass
except:
hit_except = True
finally:
hit_finally = True
self.assertFalse(hit_except)
self.assertTrue(hit_finally)
def test_try_except(self):
hit_except = False
try:
raise Exception('ahoy!')
except:
hit_except = True
self.assertTrue(hit_except)
def test_try_except_no_exception(self):
hit_except = False
try:
pass
except:
hit_except = True
self.assertFalse(hit_except)
def test_try_except_else(self):
hit_except = False
hit_else = False
try:
raise Exception('foo!')
except:
hit_except = True
else:
hit_else = True
self.assertFalse(hit_else)
self.assertTrue(hit_except)
def test_try_except_else_no_exception(self):
hit_except = False
hit_else = False
try:
pass
except:
hit_except = True
else:
hit_else = True
self.assertFalse(hit_except)
self.assertTrue(hit_else)
def test_try_finally_no_exception(self):
hit_finally = False
try:
pass
finally:
hit_finally = True
self.assertTrue(hit_finally)
def test_nested(self):
hit_finally = False
hit_inner_except = False
hit_inner_finally = False
try:
try:
raise Exception('inner exception')
except:
hit_inner_except = True
finally:
hit_inner_finally = True
finally:
hit_finally = True
self.assertTrue(hit_inner_except)
self.assertTrue(hit_inner_finally)
self.assertTrue(hit_finally)
def test_nested_else(self):
hit_else = False
hit_finally = False
hit_except = False
hit_inner_except = False
hit_inner_else = False
try:
try:
pass
except:
hit_inner_except = True
else:
hit_inner_else = True
raise Exception('outer exception')
except:
hit_except = True
else:
hit_else = True
finally:
hit_finally = True
self.assertFalse(hit_inner_except)
self.assertTrue(hit_inner_else)
self.assertFalse(hit_else)
self.assertTrue(hit_finally)
self.assertTrue(hit_except)
def test_main():
run_unittest(ExceptionTestCase)
if __name__ == '__main__':
test_main()
| bsd-2-clause |
ashang/calibre | src/calibre/ebooks/oeb/writer.py | 24 | 2728 | '''
Directory output OEBBook writer.
'''
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
import os
from calibre.ebooks.oeb.base import OPF_MIME, xml2str
from calibre.ebooks.oeb.base import DirContainer, OEBError
__all__ = ['OEBWriter']
class OEBWriter(object):
DEFAULT_PROFILE = 'PRS505'
"""Default renderer profile for content written with this Writer."""
TRANSFORMS = []
"""List of transforms to apply to content written with this Writer."""
def __init__(self, version='2.0', page_map=False, pretty_print=False):
self.version = version
self.page_map = page_map
self.pretty_print = pretty_print
@classmethod
def config(cls, cfg):
"""Add any book-writing options to the :class:`Config` object
:param:`cfg`.
"""
oeb = cfg.add_group('oeb', _('OPF/NCX/etc. generation options.'))
versions = ['1.2', '2.0']
oeb('opf_version', ['--opf-version'], default='2.0', choices=versions,
help=_('OPF version to generate. Default is %default.'))
oeb('adobe_page_map', ['--adobe-page-map'], default=False,
help=_('Generate an Adobe "page-map" file if pagination '
'information is available.'))
return cfg
@classmethod
def generate(cls, opts):
"""Generate a Writer instance from command-line options."""
version = opts.opf_version
page_map = opts.adobe_page_map
pretty_print = opts.pretty_print
return cls(version=version, page_map=page_map,
pretty_print=pretty_print)
def __call__(self, oeb, path):
"""
Write the book in the :class:`OEBBook` object :param:`oeb` to a folder
at :param:`path`.
"""
version = int(self.version[0])
opfname = None
if os.path.splitext(path)[1].lower() == '.opf':
opfname = os.path.basename(path)
path = os.path.dirname(path)
if not os.path.isdir(path):
os.mkdir(path)
output = DirContainer(path, oeb.log)
for item in oeb.manifest.values():
output.write(item.href, str(item))
if version == 1:
metadata = oeb.to_opf1()
elif version == 2:
metadata = oeb.to_opf2(page_map=self.page_map)
else:
raise OEBError("Unrecognized OPF version %r" % self.version)
pretty_print = self.pretty_print
for mime, (href, data) in metadata.items():
if opfname and mime == OPF_MIME:
href = opfname
output.write(href, xml2str(data, pretty_print=pretty_print))
return
| gpl-3.0 |
eriklavander/klaviaturregistret | web/urls.py | 1 | 1468 | from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView, DetailView, ListView, FormView, CreateView, UpdateView, DeleteView
from django.contrib.auth.views import logout
from django.contrib.auth.decorators import login_required
from web.models import Venue
from web.views import LandingPage, AdvancedSearchView, VenueList, VenueDetail, CreateVenue, DeleteVenue, CreateDescription, CreateContact, CreateImage
urlpatterns = patterns('',
url(r'^$', VenueList.as_view(), name='venue_list'),
url(r'^venue/(?P<pk>\d+)/$', VenueDetail.as_view(), name='venue_detail'),
url(r'^venue/(?P<pk>\d+)/delete$', login_required(DeleteVenue.as_view()), name='delete_venue'),
url(r'^venue/(?P<pk>\d+)/create_description$', login_required(CreateDescription.as_view()), name='create_description'),
url(r'^venue/(?P<pk>\d+)/create_contact$', login_required(CreateContact.as_view()), name='create_contact'),
url(r'^venue/(?P<pk>\d+)/create_image$', login_required(CreateImage.as_view()), name='create_image'),
url(r'^create_venue$', login_required(CreateVenue.as_view()), name='create_venue'),
url(r'^search$', AdvancedSearchView.as_view(), name='search'),
url(r'^accounts/login', LandingPage.as_view(), name='landingpage'),
url(r'accounts/profile', login_required(TemplateView.as_view(template_name="profile.html")), name='profile'),
url(r'accounts/logout', logout, {'next_page': '/'}, name='logout'),
) | gpl-2.0 |
gregier/gedit-plugins | plugins/multiedit/multiedit/signals.py | 4 | 2702 | # -*- coding: utf-8 -*-
#
# signals.py
#
# Copyright (C) 2009 - Jesse van den Kieboom
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
class Signals(object):
def __init__(self):
self._signals = {}
def _connect(self, obj, name, handler, connector):
ret = self._signals.setdefault(obj, {})
hid = connector(name, handler)
ret.setdefault(name, []).append(hid)
return hid
def connect_signal(self, obj, name, handler):
return self._connect(obj, name, handler, obj.connect)
def connect_signals(self, obj, handlers):
for name in handlers:
self.connect_signal(obj, name, handlers[name])
def connect_signal_after(self, obj, name, handler):
return self._connect(obj, name, handler, obj.connect_after)
def disconnect_signals(self, obj):
if obj not in self._signals:
return False
for name in self._signals[obj]:
for hid in self._signals[obj][name]:
obj.disconnect(hid)
del self._signals[obj]
return True
def block_signal(self, obj, name):
if obj not in self._signals:
return False
if name not in self._signals[obj]:
return False
for hid in self._signals[obj][name]:
obj.handler_block(hid)
return True
def unblock_signal(self, obj, name):
if obj not in self._signals:
return False
if name not in self._signals[obj]:
return False
for hid in self._signals[obj][name]:
obj.handler_unblock(hid)
return True
def disconnect_signal(self, obj, name):
if obj not in self._signals:
return False
if name not in self._signals[obj]:
return False
for hid in self._signals[obj][name]:
obj.disconnect(hid)
del self._signals[obj][name]
if len(self._signals[obj]) == 0:
del self._signals[obj]
return True
# ex:ts=4:et:
| gpl-2.0 |
3dfxmadscientist/CBSS | addons/account_check_writing/__init__.py | 446 | 1111 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account
import account_voucher
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hsuantien/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
harmy/kbengine | kbe/res/scripts/common/Lib/distutils/tests/test_build_clib.py | 3 | 5111 | """Tests for distutils.command.build_clib."""
import unittest
import os
import sys
from test.support import run_unittest
from distutils.command.build_clib import build_clib
from distutils.errors import DistutilsSetupError
from distutils.tests import support
from distutils.spawn import find_executable
class BuildCLibTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_check_library_dist(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
# 'libraries' option must be a list
self.assertRaises(DistutilsSetupError, cmd.check_library_list, 'foo')
# each element of 'libraries' must a 2-tuple
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
['foo1', 'foo2'])
# first element of each tuple in 'libraries'
# must be a string (the library name)
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
[(1, 'foo1'), ('name', 'foo2')])
# library name may not contain directory separators
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
[('name', 'foo1'),
('another/name', 'foo2')])
# second element of each tuple must be a dictionary (build info)
self.assertRaises(DistutilsSetupError, cmd.check_library_list,
[('name', {}),
('another', 'foo2')])
# those work
libs = [('name', {}), ('name', {'ok': 'good'})]
cmd.check_library_list(libs)
def test_get_source_files(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
# "in 'libraries' option 'sources' must be present and must be
# a list of source filenames
cmd.libraries = [('name', {})]
self.assertRaises(DistutilsSetupError, cmd.get_source_files)
cmd.libraries = [('name', {'sources': 1})]
self.assertRaises(DistutilsSetupError, cmd.get_source_files)
cmd.libraries = [('name', {'sources': ['a', 'b']})]
self.assertEqual(cmd.get_source_files(), ['a', 'b'])
cmd.libraries = [('name', {'sources': ('a', 'b')})]
self.assertEqual(cmd.get_source_files(), ['a', 'b'])
cmd.libraries = [('name', {'sources': ('a', 'b')}),
('name2', {'sources': ['c', 'd']})]
self.assertEqual(cmd.get_source_files(), ['a', 'b', 'c', 'd'])
def test_build_libraries(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
class FakeCompiler:
def compile(*args, **kw):
pass
create_static_lib = compile
cmd.compiler = FakeCompiler()
# build_libraries is also doing a bit of typoe checking
lib = [('name', {'sources': 'notvalid'})]
self.assertRaises(DistutilsSetupError, cmd.build_libraries, lib)
lib = [('name', {'sources': list()})]
cmd.build_libraries(lib)
lib = [('name', {'sources': tuple()})]
cmd.build_libraries(lib)
def test_finalize_options(self):
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
cmd.include_dirs = 'one-dir'
cmd.finalize_options()
self.assertEqual(cmd.include_dirs, ['one-dir'])
cmd.include_dirs = None
cmd.finalize_options()
self.assertEqual(cmd.include_dirs, [])
cmd.distribution.libraries = 'WONTWORK'
self.assertRaises(DistutilsSetupError, cmd.finalize_options)
def test_run(self):
# can't test on windows
if sys.platform == 'win32':
return
pkg_dir, dist = self.create_dist()
cmd = build_clib(dist)
foo_c = os.path.join(pkg_dir, 'foo.c')
self.write_file(foo_c, 'int main(void) { return 1;}\n')
cmd.libraries = [('foo', {'sources': [foo_c]})]
build_temp = os.path.join(pkg_dir, 'build')
os.mkdir(build_temp)
cmd.build_temp = build_temp
cmd.build_clib = build_temp
# before we run the command, we want to make sure
# all commands are present on the system
# by creating a compiler and checking its executables
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
compiler = new_compiler()
customize_compiler(compiler)
for ccmd in compiler.executables.values():
if ccmd is None:
continue
if find_executable(ccmd[0]) is None:
return # can't test
# this should work
cmd.run()
# let's check the result
self.assertTrue('libfoo.a' in os.listdir(build_temp))
def test_suite():
return unittest.makeSuite(BuildCLibTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| lgpl-3.0 |
solarpermit/solarpermit | website/migrations/0100_unicode_migration.py | 1 | 62291 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.execute("ALTER TABLE website_action CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_actioncategory CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_address CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_answerattachment CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_answerchoice CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_answerchoicegroup CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_answerreference CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_applicability CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_article CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_comment CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_entityview CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_entityviewcount CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_event CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_jurisdiction CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_jurisdictioncontributor CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_jurisdictionrating CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_migrationhistory CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_organization CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_organizationaddress CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_organizationcategory CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_organizationmember CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_organizationrating CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_pressrelease CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_question CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_questioncategory CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_ratingcategory CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_ratinglevel CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_reaction CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_reactioncategory CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_rewardcategory CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_roletype CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_servervariable CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_template CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_templatequestion CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_usercommentview CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_userdetail CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_userfavorite CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_userpageview CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_userrating CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_usersearch CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_view CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_vieworgs CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_viewquestions CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("ALTER TABLE website_zipcode CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci")
db.execute("OPTIMIZE TABLE website_action")
db.execute("OPTIMIZE TABLE website_actioncategory")
db.execute("OPTIMIZE TABLE website_address")
db.execute("OPTIMIZE TABLE website_answerattachment")
db.execute("OPTIMIZE TABLE website_answerchoice")
db.execute("OPTIMIZE TABLE website_answerchoicegroup")
db.execute("OPTIMIZE TABLE website_answerreference")
db.execute("OPTIMIZE TABLE website_applicability")
db.execute("OPTIMIZE TABLE website_article")
db.execute("OPTIMIZE TABLE website_comment")
db.execute("OPTIMIZE TABLE website_entityview")
db.execute("OPTIMIZE TABLE website_entityviewcount")
db.execute("OPTIMIZE TABLE website_event")
db.execute("OPTIMIZE TABLE website_jurisdiction")
db.execute("OPTIMIZE TABLE website_jurisdictioncontributor")
db.execute("OPTIMIZE TABLE website_jurisdictionrating")
db.execute("OPTIMIZE TABLE website_migrationhistory")
db.execute("OPTIMIZE TABLE website_organization")
db.execute("OPTIMIZE TABLE website_organizationaddress")
db.execute("OPTIMIZE TABLE website_organizationcategory")
db.execute("OPTIMIZE TABLE website_organizationmember")
db.execute("OPTIMIZE TABLE website_organizationrating")
db.execute("OPTIMIZE TABLE website_pressrelease")
db.execute("OPTIMIZE TABLE website_question")
db.execute("OPTIMIZE TABLE website_questioncategory")
db.execute("OPTIMIZE TABLE website_ratingcategory")
db.execute("OPTIMIZE TABLE website_ratinglevel")
db.execute("OPTIMIZE TABLE website_reaction")
db.execute("OPTIMIZE TABLE website_reactioncategory")
db.execute("OPTIMIZE TABLE website_rewardcategory")
db.execute("OPTIMIZE TABLE website_roletype")
db.execute("OPTIMIZE TABLE website_servervariable")
db.execute("OPTIMIZE TABLE website_template")
db.execute("OPTIMIZE TABLE website_templatequestion")
db.execute("OPTIMIZE TABLE website_usercommentview")
db.execute("OPTIMIZE TABLE website_userdetail")
db.execute("OPTIMIZE TABLE website_userfavorite")
db.execute("OPTIMIZE TABLE website_userpageview")
db.execute("OPTIMIZE TABLE website_userrating")
db.execute("OPTIMIZE TABLE website_usersearch")
db.execute("OPTIMIZE TABLE website_view")
db.execute("OPTIMIZE TABLE website_vieworgs")
db.execute("OPTIMIZE TABLE website_viewquestions")
db.execute("OPTIMIZE TABLE website_zipcode")
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerattachment': {
'Meta': {'object_name': 'AnswerAttachment'},
'answer_reference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerReference']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.article': {
'Meta': {'object_name': 'Article'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'publisher': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_reference'", 'null': 'True', 'to': "orm['website.Comment']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.event': {
'Meta': {'object_name': 'Event'},
'end': ('django.db.models.fields.DateTimeField', [], {}),
'expiration': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']"}),
'published': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'last_contributed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'last_contributed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'last_contributed_by_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_contributor'", 'null': 'True', 'to': "orm['website.Organization']"}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'name_for_url': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_jurisdiction'", 'null': 'True', 'to': "orm['website.Organization']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_jurisdiction'", 'null': 'True', 'to': "orm['website.Jurisdiction']"}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.jurisdictionrating': {
'Meta': {'object_name': 'JurisdictionRating'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.migrationhistory': {
'Meta': {'object_name': 'MigrationHistory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'source_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'target_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'target_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': u"orm['auth.User']"}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': u"orm['auth.User']"})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.pressrelease': {
'Meta': {'object_name': 'PressRelease'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'title': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'display_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_attributes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_suffix': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'has_multivalues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'js': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'migration_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'qtemplate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'state_exclusive': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'support_attachments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'terminology': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'validation_class': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.servervariable': {
'Meta': {'object_name': 'ServerVariable'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.usercommentview': {
'Meta': {'object_name': 'UserCommentView'},
'comments_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'last_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Comment']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_preference': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'migrated_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userfavorite': {
'Meta': {'object_name': 'UserFavorite'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userpageview': {
'Meta': {'object_name': 'UserPageView'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_page_view_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usersearch': {
'Meta': {'object_name': 'UserSearch'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.view': {
'Meta': {'object_name': 'View'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.vieworgs': {
'Meta': {'object_name': 'ViewOrgs'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"})
},
'website.viewquestions': {
'Meta': {'object_name': 'ViewQuestions'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website']
| bsd-3-clause |
saiwing-yeung/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
classcat/cctf | cctf/__init__.py | 1 | 2546 | from __future__ import absolute_import
# Config
from . import config
from .config import is_training, get_training_mode, init_graph
# Import models
from . import models
from .models.dnn import DNN
from .models.generator import SequenceGenerator
# Helpers
from . import helpers
from .helpers.evaluator import Evaluator
from .helpers.trainer import Trainer, TrainOp
from .helpers.regularizer import add_weights_regularizer
from .helpers.summarizer import summarize, summarize_activations, \
summarize_gradients, summarize_variables, summarize_all
# Predefined ops
from . import metrics
from . import activations
from . import losses
from . import initializations
from . import optimizers
from . import summaries
from . import optimizers
from . import variables
from . import collections # Add TFLearn collections to Tensorflow GraphKeys
# Direct ops inclusion
from .optimizers import SGD, AdaGrad, Adam, RMSProp, Momentum, Ftrl, AdaDelta
from .activations import linear, tanh, sigmoid, softmax, softplus, softsign,\
relu, relu6, leaky_relu, prelu, elu
from .variables import variable, get_all_trainable_variable, \
get_all_variables, get_layer_variables_by_name
from .objectives import categorical_crossentropy, binary_crossentropy, \
softmax_categorical_crossentropy, hinge_loss, mean_square
from .metrics import Top_k, Accuracy, R2, top_k_op, accuracy_op, r2_op
# Direct layers inclusion
from . import layers
from .layers.conv import conv_2d, max_pool_2d, avg_pool_2d, conv_1d, \
highway_conv_2d, highway_conv_1d, max_pool_1d, avg_pool_1d, \
global_avg_pool, residual_block, residual_bottleneck, \
conv_2d_transpose, upsample_2d
from .layers.core import input_data, dropout, custom_layer, reshape, \
flatten, activation, fully_connected, single_unit, highway, \
one_hot_encoding, time_distributed
from .layers.normalization import batch_normalization, local_response_normalization
from .layers.estimator import regression
from .layers.recurrent import lstm, gru, simple_rnn, bidirectional_rnn, \
BasicRNNCell, BasicLSTMCell, GRUCell
from .layers.embedding_ops import embedding
from .layers.merge_ops import merge, merge_outputs
# Datasets
from . import datasets
# Utils
from . import data_utils
from . import utils
from .utils import get_layer_by_name
# Data Utils
from .data_augmentation import DataAugmentation, ImageAugmentation, SequenceAugmentation
from .data_preprocessing import DataPreprocessing, ImagePreprocessing, SequencePreprocessing
# Init training mode
config.init_training_mode()
| agpl-3.0 |
ChrisEby/M101P-MongoDbForDevelopers | Week 4/Homework 4.3/sessionDAO.py | 34 | 2310 | __author__ = 'aje'
#
# Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import sys
import random
import string
# The session Data Access Object handles interactions with the sessions collection
class SessionDAO:
def __init__(self, database):
self.db = database
self.sessions = database.sessions
# will start a new session id by adding a new document to the sessions collection
# returns the sessionID or None
def start_session(self, username):
session_id = self.get_random_str(32)
session = {'username': username, '_id': session_id}
try:
self.sessions.insert(session, safe=True)
except:
print "Unexpected error on start_session:", sys.exc_info()[0]
return None
return str(session['_id'])
# will send a new user session by deleting from sessions table
def end_session(self, session_id):
if session_id is None:
return
self.sessions.remove({'_id': session_id})
return
# if there is a valid session, it is returned
def get_session(self, session_id):
if session_id is None:
return None
session = self.sessions.find_one({'_id': session_id})
return session
# get the username of the current session, or None if the session is not valid
def get_username(self, session_id):
session = self.get_session(session_id)
if session is None:
return None
else:
return session['username']
def get_random_str(self, num_chars):
random_string = ""
for i in range(num_chars):
random_string = random_string + random.choice(string.ascii_letters)
return random_string
| mit |
ric2b/Vivaldi-browser | chromium/third_party/blink/renderer/bindings/scripts/web_idl/argument.py | 1 | 1833 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from .composition_parts import WithIdentifier
from .composition_parts import WithOwner
from .idl_type import IdlType
from .literal_constant import LiteralConstant
from .make_copy import make_copy
class Argument(WithIdentifier, WithOwner):
class IR(WithIdentifier):
def __init__(self, identifier, index, idl_type, default_value=None):
assert isinstance(index, int)
assert isinstance(idl_type, IdlType)
assert (default_value is None
or isinstance(default_value, LiteralConstant))
WithIdentifier.__init__(self, identifier)
self.index = index
self.idl_type = idl_type
self.default_value = default_value
def __init__(self, ir, owner):
assert isinstance(ir, Argument.IR)
ir = make_copy(ir)
WithIdentifier.__init__(self, ir.identifier)
WithOwner.__init__(self, owner)
self._index = ir.index
self._idl_type = ir.idl_type
self._default_value = ir.default_value
@property
def index(self):
"""Returns the argument index."""
return self._index
@property
def idl_type(self):
"""Returns the type of the argument."""
return self._idl_type
@property
def is_optional(self):
"""Returns True if this is an optional argument."""
return self.idl_type.is_optional
@property
def is_variadic(self):
"""Returns True if this is a variadic argument."""
return self.idl_type.is_variadic
@property
def default_value(self):
"""Returns the default value or None."""
return self._default_value
| bsd-3-clause |
axelkennedal/dissen | dissen/signup/models/company.py | 1 | 2609 | from django.db import models
from django.core.validators import MinValueValidator
class Company(models.Model):
"""Describes a company/organization participating or interested in D-Dagen"""
def __str__(self):
return self.name + ": " + self.area_of_business
# filled in by company
AREA_OF_BUSINESS_CHOICES = (
("finance", "Finance"),
("appdev", "App Development"),
("consulting", "Consulting"),
("entertainment", "Entertainment"),
("comm", "Communication"),
("it", "IT Services"),
("prodev", "Product Development"),
("info", "Information"),
("edu", "Education"),
("data", "Data & Search"),
("fash", "Fashion"),
("sec", "Security"),
("man", "Management"),
)
name = models.CharField(max_length=50)
description = models.CharField(max_length=1100)
logotype = models.FileField()
area_of_business = models.CharField(choices=AREA_OF_BUSINESS_CHOICES, default="Pick one", max_length=100)
employees = models.IntegerField(validators=[MinValueValidator(1)])
first_time_at_fair = models.BooleanField(default=True)
billing_address = models.CharField(max_length=200)
# for internal use only
PRIORITY_CHOICES = (
("low", "LOW"),
("mid", "MID"),
("high","HIGH")
)
comment = models.CharField(max_length=500)
priority = models.CharField(choices=PRIORITY_CHOICES, default=PRIORITY_CHOICES[0], max_length=10)
def calculateTotalPrice(self):
"""Sum up the prices of the packages the company has selected for this instance of D-Dagen. Assumes there is only one instance of each package."""
totalPrice = 0
# todo
return totalPrice
class CompanyStatus(models.Model):
"""A company can be in a specific status depending on how far they've come in the application process. A new instance of this class should be created each time the company changes status, so we can track change over time (using the TIMESTAMPs). Statuses are managed by the project team or automatically by the system."""
company = models.ForeignKey(Company, on_delete=models.CASCADE)
TIMESTAMP = models.DateTimeField(auto_now_add = True)
STATUS_CHOICES = (
("notcontacted", "Not Contacted"),
("contacted", "Contacted - Waiting to sign up"),
("signedup", "Signed Up - Waiting for contract to be signed"),
("contractsigned", "Contract Signed"),
("dec", "Declined")
)
STATUS = models.CharField(choices=STATUS_CHOICES,default=STATUS_CHOICES[0], max_length=100)
| mit |
rogerscristo/BotFWD | env/lib/python3.6/site-packages/pip/_vendor/html5lib/filters/whitespace.py | 353 | 1139 | from __future__ import absolute_import, division, unicode_literals
import re
from . import base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(base.Filter):
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| mit |
GdZ/scriptfile | software/googleAppEngine/lib/django_1_4/django/contrib/localflavor/il/forms.py | 317 | 2192 | """
Israeli-specific form helpers
"""
import re
from django.core.exceptions import ValidationError
from django.core.validators import EMPTY_VALUES
from django.forms.fields import RegexField, Field, EMPTY_VALUES
from django.utils.checksums import luhn
from django.utils.translation import ugettext_lazy as _
# Israeli ID numbers consist of up to 8 digits followed by a checksum digit.
# Numbers which are shorter than 8 digits are effectively left-zero-padded.
# The checksum digit is occasionally separated from the number by a hyphen,
# and is calculated using the luhn algorithm.
#
# Relevant references:
#
# (hebrew) http://he.wikipedia.org/wiki/%D7%9E%D7%A1%D7%A4%D7%A8_%D7%96%D7%94%D7%95%D7%AA_(%D7%99%D7%A9%D7%A8%D7%90%D7%9C)
# (hebrew) http://he.wikipedia.org/wiki/%D7%A1%D7%A4%D7%A8%D7%AA_%D7%91%D7%99%D7%A7%D7%95%D7%A8%D7%AA
id_number_re = re.compile(r'^(?P<number>\d{1,8})-?(?P<check>\d)$')
class ILPostalCodeField(RegexField):
"""
A form field that validates its input as an Israeli postal code.
Valid form is XXXXX where X represents integer.
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XXXXX'),
}
def __init__(self, *args, **kwargs):
super(ILPostalCodeField, self).__init__(r'^\d{5}$', *args, **kwargs)
def clean(self, value):
if value not in EMPTY_VALUES:
value = value.replace(" ", "")
return super(ILPostalCodeField, self).clean(value)
class ILIDNumberField(Field):
"""
A form field that validates its input as an Israeli identification number.
Valid form is per the Israeli ID specification.
"""
default_error_messages = {
'invalid': _(u'Enter a valid ID number.'),
}
def clean(self, value):
value = super(ILIDNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = id_number_re.match(value)
if not match:
raise ValidationError(self.error_messages['invalid'])
value = match.group('number') + match.group('check')
if not luhn(value):
raise ValidationError(self.error_messages['invalid'])
return value
| mit |
taoger/titanium_mobile | node_modules/ioslib/node_modules/node-ios-device/node_modules/node-gyp/gyp/pylib/gyp/ninja_syntax.py | 2485 | 5536 | # This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
| apache-2.0 |
eclipselu/ppurl-alfred-workflow | src/bs4/builder/_lxml.py | 446 | 8661 | __all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
from StringIO import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from bs4.dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
if isinstance(markup, unicode):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, unicode):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(markup, try_encodings, is_html)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, unicode):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| mit |
shsingh/ansible | lib/ansible/modules/remote_management/cobbler/cobbler_sync.py | 80 | 4403 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: cobbler_sync
version_added: '2.7'
short_description: Sync Cobbler
description:
- Sync Cobbler to commit changes.
options:
host:
description:
- The name or IP address of the Cobbler system.
default: 127.0.0.1
port:
description:
- Port number to be used for REST connection.
- The default value depends on parameter C(use_ssl).
username:
description:
- The username to log in to Cobbler.
default: cobbler
password:
description:
- The password to log in to Cobbler.
required: yes
use_ssl:
description:
- If C(no), an HTTP connection will be used instead of the default HTTPS connection.
type: bool
default: 'yes'
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only set to C(no) when used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
author:
- Dag Wieers (@dagwieers)
todo:
notes:
- Concurrently syncing Cobbler is bound to fail with weird errors.
- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
'''
EXAMPLES = r'''
- name: Commit Cobbler changes
cobbler_sync:
host: cobbler01
username: cobbler
password: MySuperSecureP4sswOrd
run_once: yes
delegate_to: localhost
'''
RETURN = r'''
# Default return values
'''
import datetime
import ssl
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import xmlrpc_client
from ansible.module_utils._text import to_text
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', default='127.0.0.1'),
port=dict(type='int'),
username=dict(type='str', default='cobbler'),
password=dict(type='str', no_log=True),
use_ssl=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
),
supports_check_mode=True,
)
username = module.params['username']
password = module.params['password']
port = module.params['port']
use_ssl = module.params['use_ssl']
validate_certs = module.params['validate_certs']
module.params['proto'] = 'https' if use_ssl else 'http'
if not port:
module.params['port'] = '443' if use_ssl else '80'
result = dict(
changed=True,
)
start = datetime.datetime.utcnow()
ssl_context = None
if not validate_certs:
try: # Python 2.7.9 and newer
ssl_context = ssl.create_unverified_context()
except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
ssl._create_default_context = ssl._create_unverified_context
else: # Python 2.7.8 and older
ssl._create_default_https_context = ssl._create_unverified_https_context
url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
if ssl_context:
conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
else:
conn = xmlrpc_client.Server(url)
try:
token = conn.login(username, password)
except xmlrpc_client.Fault as e:
module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
except Exception as e:
module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e)))
if not module.check_mode:
try:
conn.sync(token)
except Exception as e:
module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e)))
elapsed = datetime.datetime.utcnow() - start
module.exit_json(elapsed=elapsed.seconds, **result)
if __name__ == '__main__':
main()
| gpl-3.0 |
kawamon/hue | desktop/core/ext-py/cx_Oracle-6.4.1/samples/DMLReturningMultipleRows.py | 2 | 1743 | #------------------------------------------------------------------------------
# Copyright 2017, Oracle and/or its affiliates. All rights reserved.
#
# Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved.
#
# Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta,
# Canada. All rights reserved.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# DMLReturningMultipleRows.py
# This script demonstrates the use of DML returning with multiple rows being
# returned at once.
#
# This script requires cx_Oracle 6.0 and higher.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import datetime
import SampleEnv
# truncate table first so that script can be rerun
connection = cx_Oracle.Connection(SampleEnv.MAIN_CONNECT_STRING)
cursor = connection.cursor()
print("Truncating table...")
cursor.execute("truncate table TestTempTable")
# populate table with a few rows
for i in range(5):
data = (i + 1, "Test String #%d" % (i + 1))
print("Adding row", data)
cursor.execute("insert into TestTempTable values (:1, :2)", data)
# now delete them and use DML returning to return the data that was inserted
intCol = cursor.var(int)
stringCol = cursor.var(str)
print("Deleting data with DML returning...")
cursor.execute("""
delete from TestTempTable
returning IntCol, StringCol into :intCol, :stringCol""",
intCol = intCol,
stringCol = stringCol)
print("Data returned:")
for intVal, stringVal in zip(intCol.values, stringCol.values):
print(tuple([intVal, stringVal]))
| apache-2.0 |
paran0ids0ul/infernal-twin | build/pip/pip/_vendor/colorama/win32.py | 446 | 5121 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleA
_SetConsoleTitleW.argtypes = [
wintypes.LPCSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title)
| gpl-3.0 |
jhawkesworth/ansible | lib/ansible/modules/cloud/ovirt/ovirt_group_facts.py | 55 | 3393 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_group_facts
short_description: Retrieve facts about one or more oVirt/RHV groups
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt/RHV groups."
notes:
- "This module creates a new top-level C(ovirt_groups) fact, which
contains a list of groups."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search group X use following pattern: name=X"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all groups which names start with C(admin):
- ovirt_group_facts:
pattern: name=admin*
- debug:
var: ovirt_groups
'''
RETURN = '''
ovirt_groups:
description: "List of dictionaries describing the groups. Group attributes are mapped to dictionary keys,
all groups attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/group."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
groups_service = connection.system_service().groups_service()
groups = groups_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_groups=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in groups
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 |
DirtyUnicorns/android_kernel_samsung_smdk4412 | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
hackathon-3d/ice-cream-manwich-repo | server/src/BooksWithFriends/libs/django/core/mail/backends/console.py | 97 | 1135 | """
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
self._lock.acquire()
try:
stream_created = self.open()
for message in email_messages:
self.stream.write('%s\n' % message.message().as_string())
self.stream.write('-'*79)
self.stream.write('\n')
self.stream.flush() # flush after each message
if stream_created:
self.close()
except:
if not self.fail_silently:
raise
finally:
self._lock.release()
return len(email_messages)
| gpl-2.0 |
ondra-novak/chromium.src | native_client_sdk/src/tools/fix_manifest.py | 44 | 3656 | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Disable the lint error for too-long lines for the URL below.
# pylint: disable=C0301
"""Fix Chrome App manifest.json files for use with multi-platform zip files.
See info about multi-platform zip files here:
https://developer.chrome.com/native-client/devguide/distributing#packaged-application
The manifest.json file needs to point to the correct platform-specific paths,
but we build all toolchains and configurations in the same tree. As a result,
we can't have one manifest.json for all combinations.
Instead, we update the top-level manifest.json file during the build:
"platforms": [
{
"nacl_arch": "x86-64",
"sub_package_path": "_platform_specific/x86-64/"
},
...
Becomes
"platforms": [
{
"nacl_arch": "x86-64",
"sub_package_path": "<toolchain>/<config>/_platform_specific/x86-64/"
},
...
"""
import collections
import json
import optparse
import os
import sys
if sys.version_info < (2, 6, 0):
sys.stderr.write("python 2.6 or later is required run this script\n")
sys.exit(1)
class Error(Exception):
"""Local Error class for this file."""
pass
def Trace(msg):
if Trace.verbose:
sys.stderr.write(str(msg) + '\n')
Trace.verbose = False
def main(argv):
parser = optparse.OptionParser(
usage='Usage: %prog [options] manifest.json', description=__doc__)
parser.add_option('-p', '--prefix',
help='Prefix to set for all sub_package_paths in the '
'manifest. If none is specified, the prefix will be '
'removed; i.e. the start of the path will be '
'"_platform_specific/..."')
parser.add_option('-v', '--verbose',
help='Verbose output', action='store_true')
options, args = parser.parse_args(argv)
if options.verbose:
Trace.verbose = True
if not args:
parser.error('Expected manifest file.')
manifest = args[0]
Trace('Reading %s' % manifest)
with open(manifest) as f:
# Keep the dictionary order. This is only supported on Python 2.7+
if sys.version_info >= (2, 7, 0):
data = json.load(f, object_pairs_hook=collections.OrderedDict)
else:
data = json.load(f)
if 'platforms' not in data:
raise Error('%s does not have "platforms" key.' % manifest)
platforms = data['platforms']
if type(platforms) is not list:
raise Error('Expected "platforms" key to be array.')
if options.prefix:
prefix = options.prefix + '/'
else:
prefix = ''
for platform in platforms:
nacl_arch = platform.get('nacl_arch')
if 'sub_package_path' not in platform:
raise Error('Expected each platform to have "sub_package_path" key.')
sub_package_path = platform['sub_package_path']
index = sub_package_path.find('_platform_specific')
if index == -1:
raise Error('Could not find "_platform_specific" in the '
'"sub_package_path" key.')
new_path = prefix + sub_package_path[index:]
platform['sub_package_path'] = new_path
Trace(' %s: "%s" -> "%s"' % (nacl_arch, sub_package_path, new_path))
with open(manifest, 'w') as f:
json.dump(data, f, indent=2)
return 0
if __name__ == '__main__':
try:
rtn = main(sys.argv[1:])
except Error, e:
sys.stderr.write('%s: %s\n' % (os.path.basename(__file__), e))
rtn = 1
except KeyboardInterrupt:
sys.stderr.write('%s: interrupted\n' % os.path.basename(__file__))
rtn = 1
sys.exit(rtn)
| bsd-3-clause |
Matt-Deacalion/django | django/db/backends/sqlite3/base.py | 323 | 18115 | """
SQLite3 backend for django.
Works with either the pysqlite2 module or the sqlite3 module in the
standard library.
"""
from __future__ import unicode_literals
import datetime
import decimal
import re
import warnings
from django.conf import settings
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.validation import BaseDatabaseValidation
from django.utils import six, timezone
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.safestring import SafeBytes
try:
import pytz
except ImportError:
pytz = None
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError:
from sqlite3 import dbapi2 as Database
except ImportError as exc:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc)
# Some of these import sqlite3, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def adapt_datetime_warn_on_aware_datetime(value):
# Remove this function and rely on the default adapter in Django 2.0.
if settings.USE_TZ and timezone.is_aware(value):
warnings.warn(
"The SQLite database adapter received an aware datetime (%s), "
"probably from cursor.execute(). Update your code to pass a "
"naive datetime in the database connection's time zone (UTC by "
"default).", RemovedInDjango20Warning)
# This doesn't account for the database connection's timezone,
# which isn't known. (That's why this adapter is deprecated.)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return value.isoformat(str(" "))
def decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
return lambda s: conv_func(s.decode('utf-8'))
Database.register_converter(str("bool"), decoder(lambda s: s == '1'))
Database.register_converter(str("time"), decoder(parse_time))
Database.register_converter(str("date"), decoder(parse_date))
Database.register_converter(str("datetime"), decoder(parse_datetime))
Database.register_converter(str("timestamp"), decoder(parse_datetime))
Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime))
Database.register_converter(str("decimal"), decoder(backend_utils.typecast_decimal))
Database.register_adapter(datetime.datetime, adapt_datetime_warn_on_aware_datetime)
Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal)
if six.PY2:
Database.register_adapter(str, lambda s: s.decode('utf-8'))
Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8'))
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
if self.features.can_share_in_memory_db:
kwargs.update({'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
return conn
def init_connection_state(self):
pass
def create_cursor(self):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db(self.settings_dict['NAME']):
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0], table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self, name):
return name == ":memory:" or "mode=memory" in force_text(name)
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_datetime_parse(dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a timedelta object
- A string representing a datetime
"""
try:
if isinstance(lhs, six.integer_types):
lhs = str(decimal.Decimal(lhs) / decimal.Decimal(1000000))
real_lhs = parse_duration(lhs)
if real_lhs is None:
real_lhs = backend_utils.typecast_timestamp(lhs)
if isinstance(rhs, six.integer_types):
rhs = str(decimal.Decimal(rhs) / decimal.Decimal(1000000))
real_rhs = parse_duration(rhs)
if real_rhs is None:
real_rhs = backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, force_text(re_string))) if re_string is not None else False
def _sqlite_power(x, y):
return x ** y
| bsd-3-clause |
wscullin/spack | var/spack/repos/builtin/packages/xorg-cf-files/package.py | 3 | 1852 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class XorgCfFiles(AutotoolsPackage):
"""The xorg-cf-files package contains the data files for the imake utility,
defining the known settings for a wide variety of platforms (many of which
have not been verified or tested in over a decade), and for many of the
libraries formerly delivered in the X.Org monolithic releases."""
homepage = "http://cgit.freedesktop.org/xorg/util/cf"
url = "https://www.x.org/archive/individual/util/xorg-cf-files-1.0.6.tar.gz"
version('1.0.6', 'c0ce98377c70d95fb48e1bd856109bf8')
depends_on('pkg-config@0.9.0:', type='build')
| lgpl-2.1 |
heeraj123/oh-mainline | vendor/packages/whoosh/src/whoosh/qparser/__init__.py | 117 | 1640 | # Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from whoosh.qparser.default import *
from whoosh.qparser.plugins import *
from whoosh.qparser.syntax import *
| agpl-3.0 |
naphthalene/fabric-bolt | fabric_bolt/projects/migrations/0009_load_some_data_yo.py | 15 | 5122 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
project_type = orm.ProjectType.objects.all()[0]
project = orm.Project()
project.project_type = project_type
project.name = 'Fabric Bolt'
project.description = 'Some awesome info on this project'
project.save()
stages = ['Production', 'Testing', 'Develop', 'Database']
for stage in stages:
s = orm.Stage()
s.project = project
s.name = stage
s.save()
configs = [
{
'key': 'port',
'value': '8015',
}, {
'key': 'ip',
'value': '127.0.0.1',
}, {
'key': 'server_name',
'value': 'example.com',
}
]
for config in configs:
c = orm.Configuration()
c.project = project
c.key = config['key']
c.value = config['value']
c.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'projects.configuration': {
'Meta': {'object_name': 'Configuration'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"}),
'stage': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Stage']", 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'projects.deployment': {
'Meta': {'object_name': 'Deployment'},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'output': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'stage': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Stage']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'projects.project': {
'Meta': {'object_name': 'Project'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'number_of_deployments': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectType']", 'null': 'True', 'blank': 'True'})
},
u'projects.projecttype': {
'Meta': {'object_name': 'ProjectType'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'projects.stage': {
'Meta': {'object_name': 'Stage'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"})
}
}
complete_apps = ['projects']
symmetrical = True
| mit |
Zeken/audacity | lib-src/lv2/lv2/waflib/Tools/flex.py | 314 | 1057 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import waflib.TaskGen,os,re
def decide_ext(self,node):
if'cxx'in self.features:
return['.lex.cc']
return['.lex.c']
def flexfun(tsk):
env=tsk.env
bld=tsk.generator.bld
wd=bld.variant_dir
def to_list(xx):
if isinstance(xx,str):return[xx]
return xx
tsk.last_cmd=lst=[]
lst.extend(to_list(env['FLEX']))
lst.extend(to_list(env['FLEXFLAGS']))
inputs=[a.path_from(bld.bldnode)for a in tsk.inputs]
if env.FLEX_MSYS:
inputs=[x.replace(os.sep,'/')for x in inputs]
lst.extend(inputs)
lst=[x for x in lst if x]
txt=bld.cmd_and_log(lst,cwd=wd,env=env.env or None,quiet=0)
tsk.outputs[0].write(txt.replace('\r\n','\n').replace('\r','\n'))
waflib.TaskGen.declare_chain(name='flex',rule=flexfun,ext_in='.l',decider=decide_ext,)
def configure(conf):
conf.find_program('flex',var='FLEX')
conf.env.FLEXFLAGS=['-t']
if re.search(r"\\msys\\[0-9.]+\\bin\\flex.exe$",conf.env.FLEX):
conf.env.FLEX_MSYS=True
| gpl-2.0 |
davelab6/nototools | nototools/subset.py | 8 | 2851 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Routines for subsetting fonts."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
from fontTools import subset
import coverage
def subset_font(source_file, target_file,
include=None, exclude=None, options=None):
"""Subsets a font file.
Subsets a font file based on a specified character set. If only include is
specified, only characters from that set would be included in the output
font. If only exclude is specified, all characters except those in that
set will be included. If neither is specified, the character set will
remain the same, but inaccessible glyphs will be removed.
Args:
source_file: Input file name.
target_file: Output file name
include: The list of characters to include from the source font.
exclude: The list of characters to exclude from the source font.
options: A dictionary listing which options should be different from the
default.
Raises:
NotImplementedError: Both include and exclude were specified.
"""
opt = subset.Options()
opt.name_IDs = ['*']
opt.name_legacy = True
opt.name_languages = ['*']
opt.layout_features = ['*']
opt.notdef_outline = True
opt.recalc_bounds = True
opt.recalc_timestamp = True
opt.canonical_order = True
if options is not None:
for name, value in options.iteritems():
setattr(opt, name, value)
if include is not None:
if exclude is not None:
raise NotImplementedError(
'Subset cannot include and exclude a set at the same time.')
target_charset = include
else:
if exclude is None:
exclude = []
source_charset = coverage.character_set(source_file)
target_charset = source_charset - set(exclude)
font = subset.load_font(source_file, opt)
subsetter = subset.Subsetter(options=opt)
subsetter.populate(unicodes=target_charset)
subsetter.subset(font)
subset.save_font(font, target_file, opt)
def main(argv):
"""Subset the first argument to second, dropping unused parts of the font.
"""
subset_font(argv[1], argv[2])
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
kubernetes-client/python | kubernetes/client/models/v1_lease_list.py | 1 | 6788 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1LeaseList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1Lease]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1LeaseList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1LeaseList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1LeaseList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1LeaseList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1LeaseList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1LeaseList. # noqa: E501
Items is a list of schema objects. # noqa: E501
:return: The items of this V1LeaseList. # noqa: E501
:rtype: list[V1Lease]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1LeaseList.
Items is a list of schema objects. # noqa: E501
:param items: The items of this V1LeaseList. # noqa: E501
:type: list[V1Lease]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1LeaseList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1LeaseList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1LeaseList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1LeaseList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1LeaseList. # noqa: E501
:return: The metadata of this V1LeaseList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1LeaseList.
:param metadata: The metadata of this V1LeaseList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1LeaseList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1LeaseList):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 |
RockySteveJobs/python-for-android | python3-alpha/python3-src/Lib/test/test_winsound.py | 60 | 9242 | # Ridiculously simple test of the winsound module for Windows.
import unittest
from test import support
support.requires('audio')
import time
import os
import subprocess
winsound = support.import_module('winsound')
ctypes = support.import_module('ctypes')
import winreg
def has_sound(sound):
"""Find out if a particular event is configured with a default sound"""
try:
# Ask the mixer API for the number of devices it knows about.
# When there are no devices, PlaySound will fail.
if ctypes.windll.winmm.mixerGetNumDevs() is 0:
return False
key = winreg.OpenKeyEx(winreg.HKEY_CURRENT_USER,
"AppEvents\Schemes\Apps\.Default\{0}\.Default".format(sound))
value = winreg.EnumValue(key, 0)[1]
if value is not "":
return True
else:
return False
except WindowsError:
return False
class BeepTest(unittest.TestCase):
# As with PlaySoundTest, incorporate the _have_soundcard() check
# into our test methods. If there's no audio device present,
# winsound.Beep returns 0 and GetLastError() returns 127, which
# is: ERROR_PROC_NOT_FOUND ("The specified procedure could not
# be found"). (FWIW, virtual/Hyper-V systems fall under this
# scenario as they have no sound devices whatsoever (not even
# a legacy Beep device).)
def test_errors(self):
self.assertRaises(TypeError, winsound.Beep)
self.assertRaises(ValueError, winsound.Beep, 36, 75)
self.assertRaises(ValueError, winsound.Beep, 32768, 75)
def test_extremes(self):
self._beep(37, 75)
self._beep(32767, 75)
def test_increasingfrequency(self):
for i in range(100, 2000, 100):
self._beep(i, 75)
def _beep(self, *args):
# these tests used to use _have_soundcard(), but it's quite
# possible to have a soundcard, and yet have the beep driver
# disabled. So basically, we have no way of knowing whether
# a beep should be produced or not, so currently if these
# tests fail we're ignoring them
#
# XXX the right fix for this is to define something like
# _have_enabled_beep_driver() and use that instead of the
# try/except below
try:
winsound.Beep(*args)
except RuntimeError:
pass
class MessageBeepTest(unittest.TestCase):
def tearDown(self):
time.sleep(0.5)
def test_default(self):
self.assertRaises(TypeError, winsound.MessageBeep, "bad")
self.assertRaises(TypeError, winsound.MessageBeep, 42, 42)
winsound.MessageBeep()
def test_ok(self):
winsound.MessageBeep(winsound.MB_OK)
def test_asterisk(self):
winsound.MessageBeep(winsound.MB_ICONASTERISK)
def test_exclamation(self):
winsound.MessageBeep(winsound.MB_ICONEXCLAMATION)
def test_hand(self):
winsound.MessageBeep(winsound.MB_ICONHAND)
def test_question(self):
winsound.MessageBeep(winsound.MB_ICONQUESTION)
class PlaySoundTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(TypeError, winsound.PlaySound)
self.assertRaises(TypeError, winsound.PlaySound, "bad", "bad")
self.assertRaises(
RuntimeError,
winsound.PlaySound,
"none", winsound.SND_ASYNC | winsound.SND_MEMORY
)
@unittest.skipUnless(has_sound("SystemAsterisk"),
"No default SystemAsterisk")
def test_alias_asterisk(self):
if _have_soundcard():
winsound.PlaySound('SystemAsterisk', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemAsterisk', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemExclamation"),
"No default SystemExclamation")
def test_alias_exclamation(self):
if _have_soundcard():
winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemExclamation', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemExit"), "No default SystemExit")
def test_alias_exit(self):
if _have_soundcard():
winsound.PlaySound('SystemExit', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemExit', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemHand"), "No default SystemHand")
def test_alias_hand(self):
if _have_soundcard():
winsound.PlaySound('SystemHand', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemHand', winsound.SND_ALIAS
)
@unittest.skipUnless(has_sound("SystemQuestion"),
"No default SystemQuestion")
def test_alias_question(self):
if _have_soundcard():
winsound.PlaySound('SystemQuestion', winsound.SND_ALIAS)
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'SystemQuestion', winsound.SND_ALIAS
)
def test_alias_fallback(self):
# This test can't be expected to work on all systems. The MS
# PlaySound() docs say:
#
# If it cannot find the specified sound, PlaySound uses the
# default system event sound entry instead. If the function
# can find neither the system default entry nor the default
# sound, it makes no sound and returns FALSE.
#
# It's known to return FALSE on some real systems.
# winsound.PlaySound('!"$%&/(#+*', winsound.SND_ALIAS)
return
def test_alias_nofallback(self):
if _have_soundcard():
# Note that this is not the same as asserting RuntimeError
# will get raised: you cannot convert this to
# self.assertRaises(...) form. The attempt may or may not
# raise RuntimeError, but it shouldn't raise anything other
# than RuntimeError, and that's all we're trying to test
# here. The MS docs aren't clear about whether the SDK
# PlaySound() with SND_ALIAS and SND_NODEFAULT will return
# True or False when the alias is unknown. On Tim's WinXP
# box today, it returns True (no exception is raised). What
# we'd really like to test is that no sound is played, but
# that requires first wiring an eardrum class into unittest
# <wink>.
try:
winsound.PlaySound(
'!"$%&/(#+*',
winsound.SND_ALIAS | winsound.SND_NODEFAULT
)
except RuntimeError:
pass
else:
self.assertRaises(
RuntimeError,
winsound.PlaySound,
'!"$%&/(#+*', winsound.SND_ALIAS | winsound.SND_NODEFAULT
)
def test_stopasync(self):
if _have_soundcard():
winsound.PlaySound(
'SystemQuestion',
winsound.SND_ALIAS | winsound.SND_ASYNC | winsound.SND_LOOP
)
time.sleep(0.5)
try:
winsound.PlaySound(
'SystemQuestion',
winsound.SND_ALIAS | winsound.SND_NOSTOP
)
except RuntimeError:
pass
else: # the first sound might already be finished
pass
winsound.PlaySound(None, winsound.SND_PURGE)
else:
# Issue 8367: PlaySound(None, winsound.SND_PURGE)
# does not raise on systems without a sound card.
pass
def _get_cscript_path():
"""Return the full path to cscript.exe or None."""
for dir in os.environ.get("PATH", "").split(os.pathsep):
cscript_path = os.path.join(dir, "cscript.exe")
if os.path.exists(cscript_path):
return cscript_path
__have_soundcard_cache = None
def _have_soundcard():
"""Return True iff this computer has a soundcard."""
global __have_soundcard_cache
if __have_soundcard_cache is None:
cscript_path = _get_cscript_path()
if cscript_path is None:
# Could not find cscript.exe to run our VBScript helper. Default
# to True: most computers these days *do* have a soundcard.
return True
check_script = os.path.join(os.path.dirname(__file__),
"check_soundcard.vbs")
p = subprocess.Popen([cscript_path, check_script],
stdout=subprocess.PIPE)
__have_soundcard_cache = not p.wait()
p.stdout.close()
return __have_soundcard_cache
def test_main():
support.run_unittest(BeepTest, MessageBeepTest, PlaySoundTest)
if __name__=="__main__":
test_main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.